diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..b2ef579cbb287bf85192e1bf7de4974c2aeed981 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,91 @@ +# Contributing + +The Open-Sora project welcomes any constructive contribution from the community and the team is more than willing to work on problems you have encountered to make it a better project. + +## Development Environment Setup + +To contribute to Open-Sora, we would like to first guide you to set up a proper development environment so that you can better implement your code. You can install this library from source with the `editable` flag (`-e`, for development mode) so that your change to the source code will be reflected in runtime without re-installation. + +You can refer to the [Installation Section](./README.md#installation) and replace `pip install -v .` with `pip install -v -e .`. + + +### Code Style + +We have some static checks when you commit your code change, please make sure you can pass all the tests and make sure the coding style meets our requirements. We use pre-commit hook to make sure the code is aligned with the writing standard. To set up the code style checking, you need to follow the steps below. + +```shell +# these commands are executed under the Open-Sora directory +pip install pre-commit +pre-commit install +``` + +Code format checking will be automatically executed when you commit your changes. + + +## Contribution Guide + +You need to follow these steps below to make contribution to the main repository via pull request. You can learn about the details of pull request [here](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-pull-requests). + +### 1. Fork the Official Repository + +Firstly, you need to visit the [Open-Sora repository](https://github.com/hpcaitech/Open-Sora) and fork into your own account. The `fork` button is at the right top corner of the web page alongside with buttons such as `watch` and `star`. + +Now, you can clone your own forked repository into your local environment. + +```shell +git clone https://github.com//Open-Sora.git +``` + +### 2. Configure Git + +You need to set the official repository as your upstream so that you can synchronize with the latest update in the official repository. You can learn about upstream [here](https://www.atlassian.com/git/tutorials/git-forks-and-upstreams). + +Then add the original repository as upstream + +```shell +cd Open-Sora +git remote add upstream https://github.com/hpcaitech/Open-Sora.git +``` + +you can use the following command to verify that the remote is set. You should see both `origin` and `upstream` in the output. + +```shell +git remote -v +``` + +### 3. Synchronize with Official Repository + +Before you make changes to the codebase, it is always good to fetch the latest updates in the official repository. In order to do so, you can use the commands below. + +```shell +git fetch upstream +git checkout main +git merge upstream/main +git push origin main +``` + +### 5. Create a New Branch + +You should not make changes to the `main` branch of your forked repository as this might make upstream synchronization difficult. You can create a new branch with the appropriate name. General branch name format should start with `hotfix/` and `feature/`. `hotfix` is for bug fix and `feature` is for addition of a new feature. + + +```shell +git checkout -b +``` + +### 6. Implementation and Code Commit + +Now you can implement your code change in the source code. Remember that you installed the system in development, thus you do not need to uninstall and install to make the code take effect. The code change will be reflected in every new PyThon execution. +You can commit and push the changes to your local repository. The changes should be kept logical, modular and atomic. + +```shell +git add -A +git commit -m "" +git push -u origin +``` + +### 7. Open a Pull Request + +You can now create a pull request on the GitHub webpage of your repository. The source branch is `` of your repository and the target branch should be `main` of `hpcaitech/Open-Sora`. After creating this pull request, you should be able to see it [here](https://github.com/hpcaitech/Open-Sora/pulls). + +The Open-Sora team will review your code change and merge your code if applicable. diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..7327c123dd164dc24fc361a8eaf37c62125c3aa2 --- /dev/null +++ b/LICENSE @@ -0,0 +1,681 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + ========================================================================= + This project is inspired by the listed projects and is subject to the following licenses: + + 1. Latte (https://github.com/Vchitect/Latte/blob/main/LICENSE) + + Copyright 2024 Latte + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + 2. PixArt-alpha (https://github.com/PixArt-alpha/PixArt-alpha/blob/master/LICENSE) + + Copyright (C) 2024 PixArt-alpha/PixArt-alpha + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + + 3. dpm-solver (https://github.com/LuChengTHU/dpm-solver/blob/main/LICENSE) + + MIT License + + Copyright (c) 2022 Cheng Lu + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in all + copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + 4. DiT (https://github.com/facebookresearch/DiT/blob/main/LICENSE.txt) + + Attribution-NonCommercial 4.0 International + + ======================================================================= + + Creative Commons Corporation ("Creative Commons") is not a law firm and + does not provide legal services or legal advice. Distribution of + Creative Commons public licenses does not create a lawyer-client or + other relationship. Creative Commons makes its licenses and related + information available on an "as-is" basis. Creative Commons gives no + warranties regarding its licenses, any material licensed under their + terms and conditions, or any related information. Creative Commons + disclaims all liability for damages resulting from their use to the + fullest extent possible. + + Using Creative Commons Public Licenses + + Creative Commons public licenses provide a standard set of terms and + conditions that creators and other rights holders may use to share + original works of authorship and other material subject to copyright + and certain other rights specified in the public license below. The + following considerations are for informational purposes only, are not + exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + + ======================================================================= + + Creative Commons Attribution-NonCommercial 4.0 International Public + License + + By exercising the Licensed Rights (defined below), You accept and agree + to be bound by the terms and conditions of this Creative Commons + Attribution-NonCommercial 4.0 International Public License ("Public + License"). To the extent this Public License may be interpreted as a + contract, You are granted the Licensed Rights in consideration of Your + acceptance of these terms and conditions, and the Licensor grants You + such rights in consideration of benefits the Licensor receives from + making the Licensed Material available under these terms and + conditions. + + Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. NonCommercial means not primarily intended for or directed towards + commercial advantage or monetary compensation. For purposes of + this Public License, the exchange of the Licensed Material for + other material subject to Copyright and Similar Rights by digital + file-sharing or similar means is NonCommercial provided there is + no payment of monetary compensation in connection with the + exchange. + + j. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + k. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + l. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part, for NonCommercial purposes only; and + + b. produce, reproduce, and Share Adapted Material for + NonCommercial purposes only. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties, including when + the Licensed Material is used other than for NonCommercial + purposes. + + Section 3 -- License Conditions. + + Your exercise of the Licensed Rights is expressly made subject to the + following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + Section 4 -- Sui Generis Database Rights. + + Where the Licensed Rights include Sui Generis Database Rights that + apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database for NonCommercial purposes + only; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + + For the avoidance of doubt, this Section 4 supplements and does not + replace Your obligations under this Public License where the Licensed + Rights include other Copyright and Similar Rights. + + Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + ======================================================================= + + Creative Commons is not a party to its public + licenses. Notwithstanding, Creative Commons may elect to apply one of + its public licenses to material it publishes and in those instances + will be considered the “Licensor.” The text of the Creative Commons + public licenses is dedicated to the public domain under the CC0 Public + Domain Dedication. Except for the limited purpose of indicating that + material is shared under a Creative Commons public license or as + otherwise permitted by the Creative Commons policies published at + creativecommons.org/policies, Creative Commons does not authorize the + use of the trademark "Creative Commons" or any other trademark or logo + of Creative Commons without its prior written consent including, + without limitation, in connection with any unauthorized modifications + to any of its public licenses or any other arrangements, + understandings, or agreements concerning use of licensed material. For + the avoidance of doubt, this paragraph does not form part of the + public licenses. + + Creative Commons may be contacted at creativecommons.org. + + 5. OpenDiT (https://github.com/NUS-HPC-AI-Lab/OpenDiT/blob/master/LICENSE) + + Copyright OpenDiT + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + diff --git a/Open-Sora/.gitattributes b/Open-Sora/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..a6344aac8c09253b3b630fb776ae94478aa0275b --- /dev/null +++ b/Open-Sora/.gitattributes @@ -0,0 +1,35 @@ +*.7z filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.bz2 filter=lfs diff=lfs merge=lfs -text +*.ckpt filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.gz filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.mlmodel filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.npy filter=lfs diff=lfs merge=lfs -text +*.npz filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.parquet filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pickle filter=lfs diff=lfs merge=lfs -text +*.pkl filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text +*.rar filter=lfs diff=lfs merge=lfs -text +*.safetensors filter=lfs diff=lfs merge=lfs -text +saved_model/**/* filter=lfs diff=lfs merge=lfs -text +*.tar.* filter=lfs diff=lfs merge=lfs -text +*.tar filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tgz filter=lfs diff=lfs merge=lfs -text +*.wasm filter=lfs diff=lfs merge=lfs -text +*.xz filter=lfs diff=lfs merge=lfs -text +*.zip filter=lfs diff=lfs merge=lfs -text +*.zst filter=lfs diff=lfs merge=lfs -text +*tfevents* filter=lfs diff=lfs merge=lfs -text diff --git a/Open-Sora/README.md b/Open-Sora/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0ee86cf9097b50fbd4fab297b7ae19ec650cc3da --- /dev/null +++ b/Open-Sora/README.md @@ -0,0 +1,13 @@ +--- +title: Open Sora +emoji: 📚 +colorFrom: yellow +colorTo: indigo +sdk: gradio +sdk_version: 4.21.0 +app_file: app.py +pinned: false +license: apache-2.0 +--- + +Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/Open-Sora/requirements.txt b/Open-Sora/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..dff60d60205f73fd6818c017ee40119680db68b6 --- /dev/null +++ b/Open-Sora/requirements.txt @@ -0,0 +1,12 @@ +torch +torchvision + +packaging +ninja +flash-attn --no-build-isolation + +-v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git + +-U xformers --index-url https://download.pytorch.org/whl/cu121 + +-e git+https://github.com/hpcaitech/Open-Sora.git#egg=open-sora \ No newline at end of file diff --git a/configs/dit/inference/16x256x256.py b/configs/dit/inference/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..ccb1d796824c0b459b569e44d5ab66543814d748 --- /dev/null +++ b/configs/dit/inference/16x256x256.py @@ -0,0 +1,31 @@ +num_frames = 16 +fps = 8 +image_size = (256, 256) + +# Define model +model = dict( + type="DiT-XL/2", + condition="text", + from_pretrained="PRETRAINED_MODEL", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="clip", + from_pretrained="openai/clip-vit-base-patch32", + model_max_length=77, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=4.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/ucf101_labels.txt" +save_dir = "./outputs/samples/" diff --git a/configs/dit/inference/1x256x256-class.py b/configs/dit/inference/1x256x256-class.py new file mode 100644 index 0000000000000000000000000000000000000000..24d1c8af390a408bf3d43ef4cd9c87d18d3fea2b --- /dev/null +++ b/configs/dit/inference/1x256x256-class.py @@ -0,0 +1,31 @@ +num_frames = 1 +fps = 1 +image_size = (256, 256) + +# Define model +model = dict( + type="DiT-XL/2", + no_temporal_pos_emb=True, + condition="label_1000", + from_pretrained="DiT-XL-2-256x256.pt", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="classes", + num_classes=1000, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=4.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/imagenet_id.txt" +save_dir = "./outputs/samples/" diff --git a/configs/dit/inference/1x256x256.py b/configs/dit/inference/1x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..31a5b9f1f2f315b19b528b2c4b98cfeb8b213c58 --- /dev/null +++ b/configs/dit/inference/1x256x256.py @@ -0,0 +1,32 @@ +num_frames = 1 +fps = 1 +image_size = (256, 256) + +# Define model +model = dict( + type="DiT-XL/2", + no_temporal_pos_emb=True, + condition="text", + from_pretrained="PRETRAINED_MODEL", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="clip", + from_pretrained="openai/clip-vit-base-patch32", + model_max_length=77, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=4.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/imagenet_labels.txt" +save_dir = "./outputs/samples/" diff --git a/configs/dit/train/16x256x256.py b/configs/dit/train/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..af8ee8768af253ee124e2679706ea4320bb97def --- /dev/null +++ b/configs/dit/train/16x256x256.py @@ -0,0 +1,50 @@ +num_frames = 16 +frame_interval = 3 +image_size = (256, 256) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = False +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="DiT-XL/2", + from_pretrained="DiT-XL-2-256x256.pt", + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="clip", + from_pretrained="openai/clip-vit-base-patch32", + model_max_length=77, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 8 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/dit/train/1x256x256.py b/configs/dit/train/1x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..667e0a835652d25c41fbd1d7947e65291972f49c --- /dev/null +++ b/configs/dit/train/1x256x256.py @@ -0,0 +1,50 @@ +num_frames = 1 +frame_interval = 1 +image_size = (256, 256) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = True +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = False +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="DiT-XL/2", + no_temporal_pos_emb=True, + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="clip", + from_pretrained="openai/clip-vit-base-patch32", + model_max_length=77, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 128 +lr = 1e-4 # according to DiT repo +grad_clip = 1.0 diff --git a/configs/latte/inference/16x256x256-class.py b/configs/latte/inference/16x256x256-class.py new file mode 100644 index 0000000000000000000000000000000000000000..c46f4bc362f60effbb80c74e4cea3662d39302a1 --- /dev/null +++ b/configs/latte/inference/16x256x256-class.py @@ -0,0 +1,30 @@ +num_frames = 16 +fps = 8 +image_size = (256, 256) + +# Define model +model = dict( + type="Latte-XL/2", + condition="label_101", + from_pretrained="Latte-XL-2-256x256-ucf101.pt", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="classes", + num_classes=101, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=4.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/ucf101_id.txt" +save_dir = "./outputs/samples/" diff --git a/configs/latte/inference/16x256x256.py b/configs/latte/inference/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..cb502371d39b9324084bcda151d0a168e69fafaf --- /dev/null +++ b/configs/latte/inference/16x256x256.py @@ -0,0 +1,31 @@ +num_frames = 16 +fps = 8 +image_size = (256, 256) + +# Define model +model = dict( + type="Latte-XL/2", + condition="text", + from_pretrained="PRETRAINED_MODEL", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="clip", + from_pretrained="openai/clip-vit-base-patch32", + model_max_length=77, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=4.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/ucf101_labels.txt" +save_dir = "./outputs/samples/" diff --git a/configs/latte/train/16x256x256.py b/configs/latte/train/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..0bf6bd4126c8517d526c2af1b75d5af8a1660df0 --- /dev/null +++ b/configs/latte/train/16x256x256.py @@ -0,0 +1,49 @@ +num_frames = 16 +frame_interval = 3 +image_size = (256, 256) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="Latte-XL/2", + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="clip", + from_pretrained="openai/clip-vit-base-patch32", + model_max_length=77, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 8 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/opensora/inference/16x256x256.py b/configs/opensora/inference/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..5e1a47e55a8540f1e31b055ea5438c57b5b1df88 --- /dev/null +++ b/configs/opensora/inference/16x256x256.py @@ -0,0 +1,34 @@ +num_frames = 16 +fps = 24 // 3 +image_size = (256, 256) + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=0.5, + time_scale=1.0, + enable_flashattn=True, + enable_layernorm_kernel=True, + from_pretrained="PRETRAINED_MODEL", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="iddpm", + num_sampling_steps=100, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/t2v_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/opensora/inference/16x512x512.py b/configs/opensora/inference/16x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..b64b85c7eda479fb369626856491a90a2c41d081 --- /dev/null +++ b/configs/opensora/inference/16x512x512.py @@ -0,0 +1,35 @@ +num_frames = 16 +fps = 24 // 3 +image_size = (512, 512) + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=1.0, + enable_flashattn=True, + enable_layernorm_kernel=True, + from_pretrained="PRETRAINED_MODEL" +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=128, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="iddpm", + num_sampling_steps=100, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/t2v_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/opensora/inference/64x512x512.py b/configs/opensora/inference/64x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..e15649a35c8205d162cdd6873808e8737f8afb25 --- /dev/null +++ b/configs/opensora/inference/64x512x512.py @@ -0,0 +1,35 @@ +num_frames = 64 +fps = 24 // 2 +image_size = (512, 512) + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=2 / 3, + enable_flashattn=True, + enable_layernorm_kernel=True, + from_pretrained="PRETRAINED_MODEL", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=128, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="iddpm", + num_sampling_steps=100, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 1 +seed = 42 +prompt_path = "./assets/texts/t2v_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/opensora/train/16x256x256.py b/configs/opensora/train/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..a64a318f0c72f2786690c2631eb43884898684d8 --- /dev/null +++ b/configs/opensora/train/16x256x256.py @@ -0,0 +1,53 @@ +num_frames = 16 +frame_interval = 3 +image_size = (256, 256) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=0.5, + time_scale=1.0, + from_pretrained="PixArt-XL-2-512x512.pth", + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 8 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/opensora/train/16x512x512.py b/configs/opensora/train/16x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..885aad1fed966acddfa9ce609c65b24449cc9c05 --- /dev/null +++ b/configs/opensora/train/16x512x512.py @@ -0,0 +1,54 @@ +num_frames = 16 +frame_interval = 3 +image_size = (512, 512) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = False +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=1.0, + from_pretrained=None, + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=128, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 500 +load = None + +batch_size = 8 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/opensora/train/360x512x512.py b/configs/opensora/train/360x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..7a6f75995b96152a80ad14e6a40f4b1e2482c1e9 --- /dev/null +++ b/configs/opensora/train/360x512x512.py @@ -0,0 +1,55 @@ +num_frames = 360 +frame_interval = 1 +image_size = (512, 512) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2-seq" +sp_size = 2 + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=2 / 3, + from_pretrained=None, + enable_flashattn=True, + enable_layernorm_kernel=True, + enable_sequence_parallelism=True, # enable sq here +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=128, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 250 +load = None + +batch_size = 1 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/opensora/train/64x512x512-sp.py b/configs/opensora/train/64x512x512-sp.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b9062c987e7e90c75e5e1d2064fe8654e22b46 --- /dev/null +++ b/configs/opensora/train/64x512x512-sp.py @@ -0,0 +1,54 @@ +num_frames = 64 +frame_interval = 2 +image_size = (512, 512) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2-seq" +sp_size = 2 + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=2 / 3, + from_pretrained=None, + enable_flashattn=True, + enable_layernorm_kernel=True, + enable_sequence_parallelism=True, # enable sq here +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 1 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/opensora/train/64x512x512.py b/configs/opensora/train/64x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..dfcdcc08d250e0a1d23ece174c023975309d2ae1 --- /dev/null +++ b/configs/opensora/train/64x512x512.py @@ -0,0 +1,54 @@ +num_frames = 64 +frame_interval = 2 +image_size = (512, 512) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=2 / 3, + from_pretrained=None, + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=64, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 250 +load = None + +batch_size = 4 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/pixart/inference/16x256x256.py b/configs/pixart/inference/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc8ee653c0fa23b76c29f17e728e263c738c2ea --- /dev/null +++ b/configs/pixart/inference/16x256x256.py @@ -0,0 +1,32 @@ +num_frames = 16 +fps = 8 +image_size = (256, 256) + +# Define model +model = dict( + type="PixArt-XL/2", + space_scale=0.5, + time_scale=1.0, + from_pretrained="outputs/098-F16S3-PixArt-XL-2/epoch7-global_step30000/model_ckpt.pt", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/t2v_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/pixart/inference/1x1024MS.py b/configs/pixart/inference/1x1024MS.py new file mode 100644 index 0000000000000000000000000000000000000000..41cc97ad0402d54610302ff6c10a7a4630d1f15b --- /dev/null +++ b/configs/pixart/inference/1x1024MS.py @@ -0,0 +1,34 @@ +num_frames = 1 +fps = 1 +image_size = (1920, 512) +multi_resolution = True + +# Define model +model = dict( + type="PixArtMS-XL/2", + space_scale=2.0, + time_scale=1.0, + no_temporal_pos_emb=True, + from_pretrained="PixArt-XL-2-1024-MS.pth", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/t2i_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/pixart/inference/1x256x256.py b/configs/pixart/inference/1x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..11e06d777af8450b6610e4e99f29e10c548900c1 --- /dev/null +++ b/configs/pixart/inference/1x256x256.py @@ -0,0 +1,33 @@ +num_frames = 1 +fps = 1 +image_size = (256, 256) + +# Define model +model = dict( + type="PixArt-XL/2", + space_scale=1.0, + time_scale=1.0, + no_temporal_pos_emb=True, + from_pretrained="PixArt-XL-2-256x256.pth", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/t2i_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/pixart/inference/1x512x512.py b/configs/pixart/inference/1x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..5674259b5a36afc48384b8170fcb978a43717753 --- /dev/null +++ b/configs/pixart/inference/1x512x512.py @@ -0,0 +1,33 @@ +num_frames = 1 +fps = 1 +image_size = (512, 512) + +# Define model +model = dict( + type="PixArt-XL/2", + space_scale=1.0, + time_scale=1.0, + no_temporal_pos_emb=True, + from_pretrained="PixArt-XL-2-512x512.pth", +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, +) +scheduler = dict( + type="dpm-solver", + num_sampling_steps=20, + cfg_scale=7.0, +) +dtype = "fp16" + +# Others +batch_size = 2 +seed = 42 +prompt_path = "./assets/texts/t2i_samples.txt" +save_dir = "./outputs/samples/" diff --git a/configs/pixart/train/16x256x256.py b/configs/pixart/train/16x256x256.py new file mode 100644 index 0000000000000000000000000000000000000000..b47731e2d5fcb1418c23b68442ee1cae54425726 --- /dev/null +++ b/configs/pixart/train/16x256x256.py @@ -0,0 +1,53 @@ +num_frames = 16 +frame_interval = 3 +image_size = (256, 256) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = False +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="PixArt-XL/2", + space_scale=0.5, + time_scale=1.0, + from_pretrained="PixArt-XL-2-512x512.pth", + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 8 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/pixart/train/1x512x512.py b/configs/pixart/train/1x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..619c9aafd03a68a36815b5bbc7d12d59c3ea40c6 --- /dev/null +++ b/configs/pixart/train/1x512x512.py @@ -0,0 +1,54 @@ +num_frames = 1 +frame_interval = 1 +image_size = (512, 512) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = True +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="PixArt-XL/2", + space_scale=1.0, + time_scale=1.0, + no_temporal_pos_emb=True, + from_pretrained="PixArt-XL-2-512x512.pth", + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 1000 +load = None + +batch_size = 32 +lr = 2e-5 +grad_clip = 1.0 diff --git a/configs/pixart/train/64x512x512.py b/configs/pixart/train/64x512x512.py new file mode 100644 index 0000000000000000000000000000000000000000..628cf254fe3d379e4fe6661d62ddad6511003abc --- /dev/null +++ b/configs/pixart/train/64x512x512.py @@ -0,0 +1,54 @@ +num_frames = 64 +frame_interval = 2 +image_size = (512, 512) + +# Define dataset +root = None +data_path = "CSV_PATH" +use_image_transform = False +num_workers = 4 + +# Define acceleration +dtype = "bf16" +grad_checkpoint = True +plugin = "zero2" +sp_size = 1 + +# Define model +model = dict( + type="PixArt-XL/2", + space_scale=1.0, + time_scale=2 / 3, + from_pretrained=None, + enable_flashattn=True, + enable_layernorm_kernel=True, +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=128, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, +) +scheduler = dict( + type="iddpm", + timestep_respacing="", +) + +# Others +seed = 42 +outputs = "outputs" +wandb = False + +epochs = 1000 +log_every = 10 +ckpt_every = 250 +load = None + +batch_size = 4 +lr = 2e-5 +grad_clip = 1.0 diff --git a/docs/README_zh.md b/docs/README_zh.md new file mode 100644 index 0000000000000000000000000000000000000000..f98a411f20ecee9d278b23525b27232a040341b9 --- /dev/null +++ b/docs/README_zh.md @@ -0,0 +1,206 @@ +

+ +

+ +

+ + + + + + +
+ +## Open-Sora: 完全开源的高效复现类Sora视频生成方案 +**Open-Sora**项目是一项致力于**高效**制作高质量视频,并使所有人都能使用其模型、工具和内容的计划。 +通过采用**开源**原则,Open-Sora 不仅实现了先进视频生成技术的低成本普及,还提供了一个精简且用户友好的方案,简化了视频制作的复杂性。 +通过 Open-Sora,我们希望更多开发者一起探索内容创作领域的创新、创造和包容。 + [[English]](/README.md) + +## 📰 资讯 + +* **[2024.03.18]** 🔥 我们发布了**Open-Sora 1.0**,这是一个完全开源的视频生成项目。 +* Open-Sora 1.0 支持视频数据预处理、 加速训练、推理等全套流程。 +* 我们提供的[模型权重](#model-weights)只需 3 天的训练就能生成 2~5 秒的 512x512 视频。 +* **[2024.03.04]** Open-Sora:开源Sora复现方案,成本降低46%,序列扩充至近百万 + +## 🎥 最新视频 + +| **2s 512×512** | **2s 512×512** | **2s 512×512** | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| [](https://github.com/hpcaitech/Open-Sora/assets/99191637/de1963d3-b43b-4e68-a670-bb821ebb6f80) | [](https://github.com/hpcaitech/Open-Sora/assets/99191637/13f8338f-3d42-4b71-8142-d234fbd746cc) | [](https://github.com/hpcaitech/Open-Sora/assets/99191637/fa6a65a6-e32a-4d64-9a9e-eabb0ebb8c16) | +| A serene night scene in a forested area. [...] The video is a time-lapse, capturing the transition from day to night, with the lake and forest serving as a constant backdrop. | A soaring drone footage captures the majestic beauty of a coastal cliff, [...] The water gently laps at the rock base and the greenery that clings to the top of the cliff. | The majestic beauty of a waterfall cascading down a cliff into a serene lake. [...] The camera angle provides a bird's eye view of the waterfall. | +| [](https://github.com/hpcaitech/Open-Sora/assets/99191637/64232f84-1b36-4750-a6c0-3e610fa9aa94) | [](https://github.com/hpcaitech/Open-Sora/assets/99191637/983a1965-a374-41a7-a76b-c07941a6c1e9) | [](https://github.com/hpcaitech/Open-Sora/assets/99191637/ec10c879-9767-4c31-865f-2e8d6cf11e65) | +| A bustling city street at night, filled with the glow of car headlights and the ambient light of streetlights. [...] | The vibrant beauty of a sunflower field. The sunflowers are arranged in neat rows, creating a sense of order and symmetry. [...] | A serene underwater scene featuring a sea turtle swimming through a coral reef. The turtle, with its greenish-brown shell [...] | + +视频经过降采样处理为`.gif`格式,以便显示。点击查看原始视频。为便于显示,文字经过修剪,全文请参见 [此处](/assets/texts/t2v_samples.txt)。在我们的[图片库](https://hpcaitech.github.io/Open-Sora/)中查看更多样本。 + +## 🔆 新功能 + +* 📍Open-Sora-v1 已发布。[这里](#model-weights)提供了模型权重。只需 400K 视频片段和在单卡 H800 上训200天(类比Stable Video Diffusion 的 152M 样本),我们就能生成 2 秒的 512×512 视频。 +* ✅ 从图像扩散模型到视频扩散模型的三阶段训练。我们提供每个阶段的权重。 +* ✅ 支持训练加速,包括加速变压器、更快的 T5 和 VAE 以及序列并行。在对 64x512x512 视频进行训练时,Open-Sora 可将训练速度提高**55%**。详细信息请参见[加速训练](docs/acceleration.md)。 +* ✅ 我们提供用于数据预处理的视频切割和字幕工具。有关说明请点击[此处](tools/data/README.md),我们的数据收集计划请点击 [数据集](docs/datasets.md)。 +* ✅ 我们发现来自[VideoGPT](https://wilson1yan.github.io/videogpt/index.html)的 VQ-VAE 质量较低,因此采用了来自[Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original) 的更好的 VAE。我们还发现在时间维度上进行修补会降低质量。更多讨论,请参阅我们的 **[报告](docs/report_v1.md)**。 +* ✅ 我们研究了不同的架构,包括 DiT、Latte 和我们提出的 **STDiT**。我们的STDiT在质量和速度之间实现了更好的权衡。更多讨论,请参阅我们的 **[报告](docs/report_v1.md)**。 +* ✅ 支持剪辑和 T5 文本调节。 +* ✅ 通过将图像视为单帧视频,我们的项目支持在图像和视频(如 ImageNet 和 UCF101)上训练 DiT。更多说明请参见 [指令解析](docs/command.md)。 +* ✅ 利用[DiT](https://github.com/facebookresearch/DiT)、[Latte](https://github.com/Vchitect/Latte) 和 [PixArt](https://pixart-alpha.github.io/) 的官方权重支持推理。 + +
+查看更多 + +* ✅ 重构代码库。请参阅[结构](docs/structure.md),了解项目结构以及如何使用配置文件。 + +
+ +### 下一步计划【按优先级排序】 + +* [ ] 完成数据处理管道(包括密集光流、美学评分、文本图像相似性、重复数据删除等)。更多信息请参见[数据集](/docs/datasets.md)。**[项目进行中]** +* [ ] 训练视频-VAE。 **[项目进行中]** + +
+查看更多 + +* [ ] 支持图像和视频调节。 +* [ ] 评估流程。 +* [ ] 加入更好的调度程序,如 SD3 中的整流程序。 +* [ ] 支持可变长宽比、分辨率和持续时间。 +* [ ] 发布后支持 SD3。 + +
+ +## 目录 + +* [安装](#installation) +* [模型权重](/#model-weights) +* [推理](/#inference) +* [数据处理](/#data-processing) +* [训练](/#training) +* [贡献](/#contribution) +* [声明](/#acknowledgement) +* [引用](/#citation) + +## Installation + +```bash +# create a virtual env +conda create -n opensora python=3.10 + +# install torch +# the command below is for CUDA 12.1, choose install commands from +# https://pytorch.org/get-started/locally/ based on your own CUDA version +pip3 install torch torchvision + +# install flash attention (optional) +pip install packaging ninja +pip install flash-attn --no-build-isolation + +# install apex (optional) +pip install -v --disable-pip-version-check --no-cache-dir --no-build-isolation --config-settings "--build-option=--cpp_ext" --config-settings "--build-option=--cuda_ext" git+https://github.com/NVIDIA/apex.git + +# install xformers +pip3 install -U xformers --index-url https://download.pytorch.org/whl/cu121 + +# install this project +git clone https://github.com/hpcaitech/Open-Sora +cd Open-Sora +pip install -v . +``` + +安装完成后,建议阅读[结构](docs/structure.md),了解项目结构以及如何使用配置文件。 + +## 模型权重 + +| 分辨率 | 数据 | 迭代次数 | 批量大小 | GPU 天数 (H800) | 网址 | +| ---------- | ------ | ----------- | ---------- | --------------- | ---------- | +| 16×256×256 | 366K | 80k | 8×64 | 117 | [:link:]() | +| 16×256×256 | 20K HQ | 24k | 8×64 | 45 | [:link:]() | +| 16×512×512 | 20K HQ | 20k | 2×64 | 35 | [:link:]() | +| 64×512×512 | 50K HQ | | | | TBD | + +我们模型的权重部分由[PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha) 初始化。参数数量为 724M。有关训练的更多信息,请参阅我们的 **[报告](/docs/report_v1.md)**。有关数据集的更多信息,请参阅[数据](/docs/dataset.md)。HQ 表示高质量。 +:warning: **局限性**:我们的模型是在有限的预算内训练出来的。质量和文本对齐度相对较差。特别是在生成人类时,模型表现很差,无法遵循详细的指令。我们正在努力改进质量和文本对齐。 + +## 推理 + +要使用我们提供的权重进行推理,首先要将[T5](https://huggingface.co/DeepFloyd/t5-v1_1-xxl/tree/main)权重下载到pretrained_models/t5_ckpts/t5-v1_1-xxl 中。然后下载模型权重。运行以下命令生成样本。请参阅[此处](docs/structure.md#inference-config-demos)自定义配置。 + +```bash +# Sample 16x256x256 (5s/sample) +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path ./path/to/your/ckpt.pth + +# Sample 16x512x512 (20s/sample, 100 time steps) +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x512x512.py --ckpt-path ./path/to/your/ckpt.pth + +# Sample 64x512x512 (40s/sample, 100 time steps) +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth + +# Sample 64x512x512 with sequence parallelism (30s/sample, 100 time steps) +# sequence parallelism is enabled automatically when nproc_per_node is larger than 1 +torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/64x512x512.py --ckpt-path ./path/to/your/ckpt.pth +``` + +我们在 H800 GPU 上进行了速度测试。如需使用其他模型进行推理,请参阅[此处](docs/commands.md)获取更多说明。 + +## 数据处理 + +高质量数据是高质量模型的关键。[这里](/docs/datasets.md)有我们使用过的数据集和数据收集计划。我们提供处理视频数据的工具。目前,我们的数据处理流程包括以下步骤: + +1. 下载数据集。[[文件](/tools/datasets/README.md)] +2. 将视频分割成片段。 [[文件](/tools/scenedetect/README.md)] +3. 生成视频字幕。 [[文件](/tools/caption/README.md)] + +## 训练 + +要启动训练,首先要将[T5](https://huggingface.co/DeepFloyd/t5-v1_1-xxl/tree/main)权重下载到pretrained_models/t5_ckpts/t5-v1_1-xxl 中。然后运行以下命令在单个节点上启动训练。 + +```bash +# 1 GPU, 16x256x256 +torchrun --nnodes=1 --nproc_per_node=1 scripts/train.py configs/opensora/train/16x256x512.py --data-path YOUR_CSV_PATH +# 8 GPUs, 64x512x512 +torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT +``` + +要在多个节点上启动训练,请根据[ColossalAI](https://colossalai.org/docs/basics/launch_colossalai/#launch-with-colossal-ai-cli) 准备一个主机文件,并运行以下命令。 + +```bash +colossalai run --nproc_per_node 8 --hostfile hostfile scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --ckpt-path YOUR_PRETRAINED_CKPT +``` + +有关其他型号的培训和高级使用方法,请参阅[此处](docs/commands.md)获取更多说明。 + +## 贡献 + +如果您希望为该项目做出贡献,可以参考 [贡献指南](./CONTRIBUTING.md). + +## 声明 + +* [DiT](https://github.com/facebookresearch/DiT): Scalable Diffusion Models with Transformers. +* [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT): An acceleration for DiT training. We adopt valuable acceleration strategies for training progress from OpenDiT. +* [PixArt](https://github.com/PixArt-alpha/PixArt-alpha): An open-source DiT-based text-to-image model. +* [Latte](https://github.com/Vchitect/Latte): An attempt to efficiently train DiT for video. +* [StabilityAI VAE](https://huggingface.co/stabilityai/sd-vae-ft-mse-original): A powerful image VAE model. +* [CLIP](https://github.com/openai/CLIP): A powerful text-image embedding model. +* [T5](https://github.com/google-research/text-to-text-transfer-transformer): A powerful text encoder. +* [LLaVA](https://github.com/haotian-liu/LLaVA): A powerful image captioning model based on [Yi-34B](https://huggingface.co/01-ai/Yi-34B). + +我们对他们的出色工作和对开源的慷慨贡献表示感谢。 + +## 引用 + +```bibtex +@software{opensora, + author = {Zangwei Zheng and Xiangyu Peng and Yang You}, + title = {Open-Sora: Democratizing Efficient Video Production for All}, + month = {March}, + year = {2024}, + url = {https://github.com/hpcaitech/Open-Sora} +} +``` + +[Zangwei Zheng](https://github.com/zhengzangw) and [Xiangyu Peng](https://github.com/xyupeng) equally contributed to this work during their internship at [HPC-AI Tech](https://hpc-ai.com/). + +## Star 走势 + +[![Star History Chart](https://api.star-history.com/svg?repos=hpcaitech/Open-Sora&type=Date)](https://star-history.com/#hpcaitech/Open-Sora&Date) diff --git a/docs/acceleration.md b/docs/acceleration.md new file mode 100644 index 0000000000000000000000000000000000000000..3a0a68eb7095c8d2d668ffe8edabc6a3dfc5628d --- /dev/null +++ b/docs/acceleration.md @@ -0,0 +1,57 @@ +# Acceleration + +Open-Sora aims to provide a high-speed training framework for diffusion models. We can achieve **55%** training speed acceleration when training on **64 frames 512x512 videos**. Our framework support training **1min 1080p videos**. + +## Accelerated Transformer + +Open-Sora boosts the training speed by: + +- Kernal optimization including [flash attention](https://github.com/Dao-AILab/flash-attention), fused layernorm kernal, and the ones compiled by colossalAI. +- Hybrid parallelism including ZeRO. +- Gradient checkpointing for larger batch size. + +Our training speed on images is comparable to [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT), an project to accelerate DiT training. The training speed is measured on 8 H800 GPUs with batch size 128, image size 256x256. + +| Model | Throughput (img/s/GPU) | Throughput (tokens/s/GPU) | +| -------- | ---------------------- | ------------------------- | +| DiT | 100 | 26k | +| OpenDiT | 175 | 45k | +| OpenSora | 175 | 45k | + +## Efficient STDiT + +Our STDiT adopts spatial-temporal attention to model the video data. Compared with directly applying full attention on DiT, our STDiT is more efficient as the number of frames increases. Our current framework only supports sequence parallelism for very long sequence. + +The training speed is measured on 8 H800 GPUs with acceleration techniques applied, GC means gradient checkpointing. Both with T5 conditioning like PixArt. + +| Model | Setting | Throughput (sample/s/GPU) | Throughput (tokens/s/GPU) | +| ---------------- | -------------- | ------------------------- | ------------------------- | +| DiT | 16x256 (4k) | 7.20 | 29k | +| STDiT | 16x256 (4k) | 7.00 | 28k | +| DiT | 16x512 (16k) | 0.85 | 14k | +| STDiT | 16x512 (16k) | 1.45 | 23k | +| DiT (GC) | 64x512 (65k) | 0.08 | 5k | +| STDiT (GC) | 64x512 (65k) | 0.40 | 25k | +| STDiT (GC, sp=2) | 360x512 (370k) | 0.10 | 18k | + +With a 4x downsampling in the temporal dimension with Video-VAE, an 24fps video has 450 frames. The gap between the speed of STDiT (28k tokens/s) and DiT on images (up to 45k tokens/s) mainly comes from the T5 and VAE encoding, and temperal attention. + +## Accelerated Encoder (T5, VAE) + +During training, texts are encoded by T5, and videos are encoded by VAE. Typically there are two ways to accelerate the training: + +1. Preprocess text and video data in advance and save them to disk. +2. Encode text and video data during training, and accelerate the encoding process. + +For option 1, 120 tokens for one sample require 1M disk space, and a 64x64x64 latent requires 4M. Considering a training dataset with 10M video clips, the total disk space required is 50TB. Our storage system is not ready at this time for this scale of data. + +For option 2, we boost T5 speed and memory requirement. According to [OpenDiT](https://github.com/NUS-HPC-AI-Lab/OpenDiT), we find VAE consumes a large number of GPU memory. Thus we split batch size into smaller ones for VAE encoding. With both techniques, we can greatly accelerated the training speed. + +The training speed is measured on 8 H800 GPUs with STDiT. + +| Acceleration | Setting | Throughput (img/s/GPU) | Throughput (tokens/s/GPU) | +| ------------ | ------------- | ---------------------- | ------------------------- | +| Baseline | 16x256 (4k) | 6.16 | 25k | +| w. faster T5 | 16x256 (4k) | 7.00 | 29k | +| Baseline | 64x512 (65k) | 0.94 | 15k | +| w. both | 64x512 (65k) | 1.45 | 23k | diff --git a/docs/commands.md b/docs/commands.md new file mode 100644 index 0000000000000000000000000000000000000000..28ee285de143c9d5baf56d513728249f7aa82730 --- /dev/null +++ b/docs/commands.md @@ -0,0 +1,91 @@ +# Commands + +## Inference + +You can modify corresponding config files to change the inference settings. See more details [here](/docs/structure.md#inference-config-demos). + +### Inference with DiT pretrained on ImageNet + +The following command automatically downloads the pretrained weights on ImageNet and runs inference. + +```bash +python scripts/inference.py configs/dit/inference/1x256x256-class.py --ckpt-path DiT-XL-2-256x256.pt +``` + +### Inference with Latte pretrained on UCF101 + +The following command automatically downloads the pretrained weights on UCF101 and runs inference. + +```bash +python scripts/inference.py configs/latte/inference/16x256x256-class.py --ckpt-path Latte-XL-2-256x256-ucf101.pt +``` + +### Inference with PixArt-α pretrained weights + +Download T5 into `./pretrained_models` and run the following command. + +```bash +# 256x256 +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x256x256.py --ckpt-path PixArt-XL-2-256x256.pth + +# 512x512 +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x512x512.py --ckpt-path PixArt-XL-2-512x512.pth + +# 1024 multi-scale +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/pixart/inference/1x1024MS.py --ckpt-path PixArt-XL-2-1024MS.pth +``` + +### Inference with checkpoints saved during training + +During training, an experiment logging folder is created in `outputs` directory. Under each checpoint folder, e.g. `epoch12-global_step2000`, there is a `ema.pt` and the shared `model` folder. Run the following command to perform inference. + +```bash +# inference with ema model +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000/ema.pt + +# inference with model +torchrun --standalone --nproc_per_node 1 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000 + +# inference with sequence parallelism +# sequence parallelism is enabled automatically when nproc_per_node is larger than 1 +torchrun --standalone --nproc_per_node 2 scripts/inference.py configs/opensora/inference/16x256x256.py --ckpt-path outputs/001-STDiT-XL-2/epoch12-global_step2000 +``` + +The second command will automatically generate a `model_ckpt.pt` file in the checkpoint folder. + +### Inference Hyperparameters + +1. DPM-solver is good at fast inference for images. However, the video result is not satisfactory. You can use it for fast demo purpose. + +```python +type="dmp-solver" +num_sampling_steps=20 +``` + +1. You can use [SVD](https://huggingface.co/stabilityai/stable-video-diffusion-img2vid-xt)'s finetuned VAE decoder on videos for inference (consumes more memory). However, we do not see significant improvement in the video result. To use it, download [the pretrained weights](https://huggingface.co/maxin-cn/Latte/tree/main/t2v_required_models/vae_temporal_decoder) into `./pretrained_models/vae_temporal_decoder` and modify the config file as follows. + +```python +vae = dict( + type="VideoAutoencoderKLTemporalDecoder", + from_pretrained="pretrained_models/vae_temporal_decoder", +) + +## Training + +To resume training, run the following command. ``--load`` different from ``--ckpt-path`` as it loads the optimizer and dataloader states. + +```bash +torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --load YOUR_PRETRAINED_CKPT +``` + +To enable wandb logging, add `--wandb` to the command. + +```bash +WANDB_API_KEY=YOUR_WANDB_API_KEY torchrun --nnodes=1 --nproc_per_node=8 scripts/train.py configs/opensora/train/64x512x512.py --data-path YOUR_CSV_PATH --wandb True +``` + +You can modify corresponding config files to change the training settings. See more details [here](/docs/structure.md#training-config-demos). + +### Training Hyperparameters + +1. `dtype` is the data type for training. Only `fp16` and `bf16` are supported. ColossalAI automatically enables the mixed precision training for `fp16` and `bf16`. During training, we find `bf16` more stable. diff --git a/docs/datasets.md b/docs/datasets.md new file mode 100644 index 0000000000000000000000000000000000000000..c06835b03c67126dc5c1f11062340dcff60d9a21 --- /dev/null +++ b/docs/datasets.md @@ -0,0 +1,28 @@ +# Datasets + +## Datasets used for now + +### HD-VG-130M + +[HD-VG-130M](https://github.com/daooshee/HD-VG-130M?tab=readme-ov-file) comprises 130M text-video pairs. The caption is generated by BLIP-2. We find the cut and the text quality are relatively poor. It contains 20 splits. For OpenSora 1.0, we use the first split. We plan to use the whole dataset and re-process it. + +### Inter4k + +[Inter4k](https://github.com/alexandrosstergiou/Inter4K) is a dataset containing 1k video clips with 4K resolution. The dataset is proposed for super-resolution tasks. We use the dataset for HQ training. The videos are processed as mentioned [here](/README.md#data-processing). + +### Pexels.com + +[Pexels.com](https://www.pexels.com/) is a website that provides free stock photos and videos. We collect 19K video clips from this website for HQ training. The videos are processed as mentioned [here](/README.md#data-processing). + +## Datasets watching list + +We are also watching the following datasets and considering using them in the future, which depends on our disk space and the quality of the dataset. + +| Name | Size | Description | +| ----------------- | ------------ | ----------------------------- | +| Panda-70M | 70M videos | High quality video-text pairs | +| WebVid-10M | 10M videos | Low quality | +| InternVid-10M-FLT | 10M videos | | +| EGO4D | 3670 hours | | +| OpenDV-YouTube | 1700 hours | | +| VidProM | 6.69M videos | | diff --git a/docs/report_v1.md b/docs/report_v1.md new file mode 100644 index 0000000000000000000000000000000000000000..b3b8073cb2aa5fd257664c6947b9187683c35e6a --- /dev/null +++ b/docs/report_v1.md @@ -0,0 +1,47 @@ +# Open-Sora v1 Report + +OpenAI's Sora is amazing at generating one minutes high quality videos. However, it reveals almost no information about its details. To make AI more "open", we are dedicated to build an open-source version of Sora. This report describes our first attempt to train a transformer-based video diffusion model. + +## Efficiency in choosing the architecture + +To lower the computational cost, we want to utilize existing VAE models. Sora uses spatial-temporal VAE to reduce the temporal dimensions. However, we found that there is no open-source high-quality spatial-temporal VAE model. [MAGVIT](https://github.com/google-research/magvit)'s 4x4x4 VAE is not open-sourced, while [VideoGPT](https://wilson1yan.github.io/videogpt/index.html)'s 2x4x4 VAE has a low quality in our experiments. Thus, we decided to use a 2D VAE (from [Stability-AI](https://huggingface.co/stabilityai/sd-vae-ft-mse-original)) in our first version. + +The video training involves a large amount of tokens. Considering 24fps 1min videos, we have 1440 frames. With VAE downsampling 4x and patch size downsampling 2x, we have 1440x1024≈1.5M tokens. Full attention on 1.5M tokens leads to a huge computational cost. Thus, we use spatial-temporal attention to reduce the cost following [Latte](https://github.com/Vchitect/Latte). + +As shown in the figure, we insert a temporal attention right after each spatial attention in STDiT (ST stands for spatial-temporal). This is similar to variant 3 in Latte's paper. However, we do not control a similar number of parameters for these variants. While Latte's paper claims their variant is better than variant 3, our experiments on 16x256x256 videos show that with same number of iterations, the performance ranks as: DiT (full) > STDiT (Sequential) > STDiT (Parallel) ≈ Latte. Thus, we choose STDiT (Sequential) out of efficiency. Speed benchmark is provided [here](/docs/acceleration.md#efficient-stdit). + +![Architecture Comparison](https://i0.imgs.ovh/2024/03/15/eLk9D.png) + +To focus on video generation, we hope to train the model based on a powerful image generation model. [PixArt-α](https://github.com/PixArt-alpha/PixArt-alpha) is an efficiently trained high-quality image generation model with T5-conditioned DiT structure. We initialize our model with PixArt-α and initialize the projection layer of inserted temporal attention with zero. This initialization preserves model's ability of image generation at beginning, while Latte's architecture cannot. The inserted attention increases the number of parameter from 580M to 724M. + +![Architecture](https://i0.imgs.ovh/2024/03/16/erC1d.png) + +Drawing from the success of PixArt-α and Stable Video Diffusion, we also adopt a progressive training strategy: 16x256x256 on 366K pretraining datasets, and then 16x256x256, 16x512x512, and 64x512x512 on 20K datasets. With scaled position embedding, this strategy greatly reduces the computational cost. + +We also try to use a 3D patch embedder in DiT. However, with 2x downsampling on temporal dimension, the generated videos have a low quality. Thus, we leave the downsampling to temporal VAE in our next version. For now, we sample at every 3 frames with 16 frames training and every 2 frames with 64 frames training. + +## Data is the key to high quality + +We find that the number and quality of data have a great impact on the quality of generated videos, even larger than the model architecture and training strategy. At this time, we only prepared the first split (366K video clips) from [HD-VG-130M](https://github.com/daooshee/HD-VG-130M). The quality of these videos varies greatly, and the captions are not that accurate. Thus, we further collect 20k relatively high quality videos from [Pexels](https://www.pexels.com/), which provides free license videos. We label the video with LLaVA, an image captioning model, with three frames and a designed prompt. With designed prompt, LLaVA can generate good quality of captions. + +![Caption](https://i0.imgs.ovh/2024/03/16/eXdvC.png) + +As we lay more emphasis on the quality of data, we prepare to collect more data and build a video preprocessing pipeline in our next version. + +## Training Details + +With a limited training budgets, we made only a few exploration. We find learning rate 1e-4 is too large and scales down to 2e-5. When training with a large batch size, we find `fp16` less stable than `bf16` and may lead to generation failure. Thus, we switch to `bf16` for training on 64x512x512. For other hyper-parameters, we follow previous works. + +## Loss curves + +16x256x256 Pretraining Loss Curve + +![16x256x256 Pretraining Loss Curve](https://i0.imgs.ovh/2024/03/16/erXQj.png) + +16x256x256 HQ Training Loss Curve + +![16x256x256 HQ Training Loss Curve](https://i0.imgs.ovh/2024/03/16/ernXv.png) + +16x512x512 HQ Training Loss Curve + +![16x512x512 HQ Training Loss Curve](https://i0.imgs.ovh/2024/03/16/erHBe.png) diff --git a/docs/structure.md b/docs/structure.md new file mode 100644 index 0000000000000000000000000000000000000000..0fc087ee45b7d82c2009c4dae198fa91b9896d1a --- /dev/null +++ b/docs/structure.md @@ -0,0 +1,178 @@ +# Repo & Config Structure + +## Repo Structure + +```plaintext +Open-Sora +├── README.md +├── docs +│ ├── acceleration.md -> Acceleration & Speed benchmark +│ ├── command.md -> Commands for training & inference +│ ├── datasets.md -> Datasets used in this project +│ ├── structure.md -> This file +│ └── report_v1.md -> Report for Open-Sora v1 +├── scripts +│ ├── train.py -> diffusion training script +│ └── inference.py -> Report for Open-Sora v1 +├── configs -> Configs for training & inference +├── opensora +│ ├── __init__.py +│ ├── registry.py -> Registry helper +│   ├── acceleration -> Acceleration related code +│   ├── dataset -> Dataset related code +│   ├── models +│   │   ├── layers -> Common layers +│   │   ├── vae -> VAE as image encoder +│   │   ├── text_encoder -> Text encoder +│   │   │   ├── classes.py -> Class id encoder (inference only) +│   │   │   ├── clip.py -> CLIP encoder +│   │   │   └── t5.py -> T5 encoder +│   │   ├── dit +│   │   ├── latte +│   │   ├── pixart +│   │   └── stdit -> Our STDiT related code +│   ├── schedulers -> Diffusion shedulers +│   │   ├── iddpm -> IDDPM for training and inference +│   │ └── dpms -> DPM-Solver for fast inference +│ └── utils +└── tools -> Tools for data processing and more +``` + +## Configs + +Our config files follows [MMEgine](https://github.com/open-mmlab/mmengine). MMEngine will reads the config file (a `.py` file) and parse it into a dictionary-like object. + +```plaintext +Open-Sora +└── configs -> Configs for training & inference + ├── opensora -> STDiT related configs + │ ├── inference + │ │ ├── 16x256x256.py -> Sample videos 16 frames 256x256 + │ │ ├── 16x512x512.py -> Sample videos 16 frames 512x512 + │ │ └── 64x512x512.py -> Sample videos 64 frames 512x512 + │ └── train + │ ├── 16x256x256.py -> Train on videos 16 frames 256x256 + │ ├── 16x256x256.py -> Train on videos 16 frames 256x256 + │ └── 64x512x512.py -> Train on videos 64 frames 512x512 + ├── dit -> DiT related configs +    │   ├── inference +    │   │   ├── 1x256x256-class.py -> Sample images with ckpts from DiT +    │   │   ├── 1x256x256.py -> Sample images with clip condition +    │   │   └── 16x256x256.py -> Sample videos +    │   └── train +    │     ├── 1x256x256.py -> Train on images with clip condition +    │      └── 16x256x256.py -> Train on videos + ├── latte -> Latte related configs + └── pixart -> PixArt related configs +``` + +## Inference config demos + +To change the inference settings, you can directly modify the corresponding config file. Or you can pass arguments to overwrite the config file ([config_utils.py](/opensora/utils/config_utils.py)). To change sampling prompts, you should modify the `.txt` file passed to the `--prompt_path` argument. + +```plaintext +--prompt_path ./assets/texts/t2v_samples.txt -> prompt_path +--ckpt-path ./path/to/your/ckpt.pth -> model["from_pretrained"] +``` + +The explanation of each field is provided below. + +```python +# Define sampling size +num_frames = 64 # number of frames +fps = 24 // 2 # frames per second (divided by 2 for frame_interval=2) +image_size = (512, 512) # image size (height, width) + +# Define model +model = dict( + type="STDiT-XL/2", # Select model type (STDiT-XL/2, DiT-XL/2, etc.) + space_scale=1.0, # (Optional) Space positional encoding scale (new height / old height) + time_scale=2 / 3, # (Optional) Time positional encoding scale (new frame_interval / old frame_interval) + enable_flashattn=True, # (Optional) Speed up training and inference with flash attention + enable_layernorm_kernel=True, # (Optional) Speed up training and inference with fused kernel + from_pretrained="PRETRAINED_MODEL", # (Optional) Load from pretrained model + no_temporal_pos_emb=True, # (Optional) Disable temporal positional encoding (for image) +) +vae = dict( + type="VideoAutoencoderKL", # Select VAE type + from_pretrained="stabilityai/sd-vae-ft-ema", # Load from pretrained VAE + micro_batch_size=128, # VAE with micro batch size to save memory +) +text_encoder = dict( + type="t5", # Select text encoder type (t5, clip) + from_pretrained="./pretrained_models/t5_ckpts", # Load from pretrained text encoder + model_max_length=120, # Maximum length of input text +) +scheduler = dict( + type="iddpm", # Select scheduler type (iddpm, dpm-solver) + num_sampling_steps=100, # Number of sampling steps + cfg_scale=7.0, # hyper-parameter for classifier-free diffusion +) +dtype = "fp16" # Computation type (fp16, fp32, bf16) + +# Other settings +batch_size = 1 # batch size +seed = 42 # random seed +prompt_path = "./assets/texts/t2v_samples.txt" # path to prompt file +save_dir = "./samples" # path to save samples +``` + +## Training config demos + +```python +# Define sampling size +num_frames = 64 +frame_interval = 2 # sample every 2 frames +image_size = (512, 512) + +# Define dataset +root = None # root path to the dataset +data_path = "CSV_PATH" # path to the csv file +use_image_transform = False # True if training on images +num_workers = 4 # number of workers for dataloader + +# Define acceleration +dtype = "bf16" # Computation type (fp16, bf16) +grad_checkpoint = True # Use gradient checkpointing +plugin = "zero2" # Plugin for distributed training (zero2, zero2-seq) +sp_size = 1 # Sequence parallelism size (1 for no sequence parallelism) + +# Define model +model = dict( + type="STDiT-XL/2", + space_scale=1.0, + time_scale=2 / 3, + from_pretrained="YOUR_PRETRAINED_MODEL", + enable_flashattn=True, # Enable flash attention + enable_layernorm_kernel=True, # Enable layernorm kernel +) +vae = dict( + type="VideoAutoencoderKL", + from_pretrained="stabilityai/sd-vae-ft-ema", + micro_batch_size=128, +) +text_encoder = dict( + type="t5", + from_pretrained="./pretrained_models/t5_ckpts", + model_max_length=120, + shardformer=True, # Enable shardformer for T5 acceleration +) +scheduler = dict( + type="iddpm", + timestep_respacing="", # Default 1000 timesteps +) + +# Others +seed = 42 +outputs = "outputs" # path to save checkpoints +wandb = False # Use wandb for logging + +epochs = 1000 # number of epochs (just large enough, kill when satisfied) +log_every = 10 +ckpt_every = 250 +load = None # path to resume training + +batch_size = 4 +lr = 2e-5 +grad_clip = 1.0 # gradient clipping +``` diff --git a/opensora/__init__.py b/opensora/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a3175b2df160a6b1215dc75eb1cc1a91bfc51ae0 --- /dev/null +++ b/opensora/__init__.py @@ -0,0 +1,4 @@ +from .acceleration import * +from .datasets import * +from .models import * +from .registry import * diff --git a/opensora/acceleration/__init__.py b/opensora/acceleration/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/opensora/acceleration/checkpoint.py b/opensora/acceleration/checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..d832a0105ac278982feee34109bc585b4bf4d9d0 --- /dev/null +++ b/opensora/acceleration/checkpoint.py @@ -0,0 +1,24 @@ +from collections.abc import Iterable + +import torch.nn as nn +from torch.utils.checkpoint import checkpoint, checkpoint_sequential + + +def set_grad_checkpoint(model, use_fp32_attention=False, gc_step=1): + assert isinstance(model, nn.Module) + + def set_attr(module): + module.grad_checkpointing = True + module.fp32_attention = use_fp32_attention + module.grad_checkpointing_step = gc_step + + model.apply(set_attr) + + +def auto_grad_checkpoint(module, *args, **kwargs): + if getattr(module, "grad_checkpointing", False): + if not isinstance(module, Iterable): + return checkpoint(module, *args, **kwargs) + gc_step = module[0].grad_checkpointing_step + return checkpoint_sequential(module, gc_step, *args, **kwargs) + return module(*args, **kwargs) diff --git a/opensora/acceleration/communications.py b/opensora/acceleration/communications.py new file mode 100644 index 0000000000000000000000000000000000000000..d0900d20841248a250b5aeb31755fac689474ff8 --- /dev/null +++ b/opensora/acceleration/communications.py @@ -0,0 +1,188 @@ +import torch +import torch.distributed as dist + + +# ==================== +# All-To-All +# ==================== +def _all_to_all( + input_: torch.Tensor, + world_size: int, + group: dist.ProcessGroup, + scatter_dim: int, + gather_dim: int, +): + input_list = [t.contiguous() for t in torch.tensor_split(input_, world_size, scatter_dim)] + output_list = [torch.empty_like(input_list[0]) for _ in range(world_size)] + dist.all_to_all(output_list, input_list, group=group) + return torch.cat(output_list, dim=gather_dim).contiguous() + + +class _AllToAll(torch.autograd.Function): + """All-to-all communication. + + Args: + input_: input matrix + process_group: communication group + scatter_dim: scatter dimension + gather_dim: gather dimension + """ + + @staticmethod + def forward(ctx, input_, process_group, scatter_dim, gather_dim): + ctx.process_group = process_group + ctx.scatter_dim = scatter_dim + ctx.gather_dim = gather_dim + ctx.world_size = dist.get_world_size(process_group) + output = _all_to_all(input_, ctx.world_size, process_group, scatter_dim, gather_dim) + return output + + @staticmethod + def backward(ctx, grad_output): + grad_output = _all_to_all( + grad_output, + ctx.world_size, + ctx.process_group, + ctx.gather_dim, + ctx.scatter_dim, + ) + return ( + grad_output, + None, + None, + None, + ) + + +def all_to_all( + input_: torch.Tensor, + process_group: dist.ProcessGroup, + scatter_dim: int = 2, + gather_dim: int = 1, +): + return _AllToAll.apply(input_, process_group, scatter_dim, gather_dim) + + +def _gather( + input_: torch.Tensor, + world_size: int, + group: dist.ProcessGroup, + gather_dim: int, +): + if gather_list is None: + gather_list = [torch.empty_like(input_) for _ in range(world_size)] + dist.gather(input_, gather_list, group=group, gather_dim=gather_dim) + return gather_list + + +# ==================== +# Gather-Split +# ==================== + + +def _split(input_, pg: dist.ProcessGroup, dim=-1): + # skip if only one rank involved + world_size = dist.get_world_size(pg) + rank = dist.get_rank(pg) + if world_size == 1: + return input_ + + # Split along last dimension. + dim_size = input_.size(dim) + assert dim_size % world_size == 0, ( + f"The dimension to split ({dim_size}) is not a multiple of world size ({world_size}), " + f"cannot split tensor evenly" + ) + + tensor_list = torch.split(input_, dim_size // world_size, dim=dim) + output = tensor_list[rank].contiguous() + + return output + + +def _gather(input_, pg: dist.ProcessGroup, dim=-1): + # skip if only one rank involved + input_ = input_.contiguous() + world_size = dist.get_world_size(pg) + dist.get_rank(pg) + + if world_size == 1: + return input_ + + # all gather + tensor_list = [torch.empty_like(input_) for _ in range(world_size)] + assert input_.device.type == "cuda" + torch.distributed.all_gather(tensor_list, input_, group=pg) + + # concat + output = torch.cat(tensor_list, dim=dim).contiguous() + + return output + + +class _GatherForwardSplitBackward(torch.autograd.Function): + """Gather the input from model parallel region and concatenate. + + Args: + input_: input matrix. + process_group: parallel mode. + dim: dimension + """ + + @staticmethod + def symbolic(graph, input_): + return _gather(input_) + + @staticmethod + def forward(ctx, input_, process_group, dim, grad_scale): + ctx.mode = process_group + ctx.dim = dim + ctx.grad_scale = grad_scale + return _gather(input_, process_group, dim) + + @staticmethod + def backward(ctx, grad_output): + if ctx.grad_scale == "up": + grad_output = grad_output * dist.get_world_size(ctx.mode) + elif ctx.grad_scale == "down": + grad_output = grad_output / dist.get_world_size(ctx.mode) + + return _split(grad_output, ctx.mode, ctx.dim), None, None, None + + +class _SplitForwardGatherBackward(torch.autograd.Function): + """ + Split the input and keep only the corresponding chuck to the rank. + + Args: + input_: input matrix. + process_group: parallel mode. + dim: dimension + """ + + @staticmethod + def symbolic(graph, input_): + return _split(input_) + + @staticmethod + def forward(ctx, input_, process_group, dim, grad_scale): + ctx.mode = process_group + ctx.dim = dim + ctx.grad_scale = grad_scale + return _split(input_, process_group, dim) + + @staticmethod + def backward(ctx, grad_output): + if ctx.grad_scale == "up": + grad_output = grad_output * dist.get_world_size(ctx.mode) + elif ctx.grad_scale == "down": + grad_output = grad_output / dist.get_world_size(ctx.mode) + return _gather(grad_output, ctx.mode, ctx.dim), None, None, None + + +def split_forward_gather_backward(input_, process_group, dim, grad_scale=1.0): + return _SplitForwardGatherBackward.apply(input_, process_group, dim, grad_scale) + + +def gather_forward_split_backward(input_, process_group, dim, grad_scale=None): + return _GatherForwardSplitBackward.apply(input_, process_group, dim, grad_scale) diff --git a/opensora/acceleration/parallel_states.py b/opensora/acceleration/parallel_states.py new file mode 100644 index 0000000000000000000000000000000000000000..ff2893e33c86da4cb8a5170566917355af882825 --- /dev/null +++ b/opensora/acceleration/parallel_states.py @@ -0,0 +1,19 @@ +import torch.distributed as dist + +_GLOBAL_PARALLEL_GROUPS = dict() + + +def set_data_parallel_group(group: dist.ProcessGroup): + _GLOBAL_PARALLEL_GROUPS["data"] = group + + +def get_data_parallel_group(): + return _GLOBAL_PARALLEL_GROUPS.get("data", None) + + +def set_sequence_parallel_group(group: dist.ProcessGroup): + _GLOBAL_PARALLEL_GROUPS["sequence"] = group + + +def get_sequence_parallel_group(): + return _GLOBAL_PARALLEL_GROUPS.get("sequence", None) diff --git a/opensora/acceleration/plugin.py b/opensora/acceleration/plugin.py new file mode 100644 index 0000000000000000000000000000000000000000..c657a9539d8fb1f0d65e8f452777a4bb73a84d4d --- /dev/null +++ b/opensora/acceleration/plugin.py @@ -0,0 +1,100 @@ +import random +from typing import Optional + +import numpy as np +import torch +from colossalai.booster.plugin import LowLevelZeroPlugin +from colossalai.cluster import ProcessGroupMesh +from torch.utils.data import DataLoader +from torch.utils.data.distributed import DistributedSampler + +DP_AXIS, SP_AXIS = 0, 1 + + +class ZeroSeqParallelPlugin(LowLevelZeroPlugin): + def __init__( + self, + sp_size: int = 1, + stage: int = 2, + precision: str = "fp16", + initial_scale: float = 2**32, + min_scale: float = 1, + growth_factor: float = 2, + backoff_factor: float = 0.5, + growth_interval: int = 1000, + hysteresis: int = 2, + max_scale: float = 2**32, + max_norm: float = 0.0, + norm_type: float = 2.0, + reduce_bucket_size_in_m: int = 12, + communication_dtype: Optional[torch.dtype] = None, + overlap_communication: bool = True, + cpu_offload: bool = False, + master_weights: bool = True, + verbose: bool = False, + ) -> None: + super().__init__( + stage=stage, + precision=precision, + initial_scale=initial_scale, + min_scale=min_scale, + growth_factor=growth_factor, + backoff_factor=backoff_factor, + growth_interval=growth_interval, + hysteresis=hysteresis, + max_scale=max_scale, + max_norm=max_norm, + norm_type=norm_type, + reduce_bucket_size_in_m=reduce_bucket_size_in_m, + communication_dtype=communication_dtype, + overlap_communication=overlap_communication, + cpu_offload=cpu_offload, + master_weights=master_weights, + verbose=verbose, + ) + self.sp_size = sp_size + assert self.world_size % sp_size == 0, "world_size must be divisible by sp_size" + self.dp_size = self.world_size // sp_size + self.pg_mesh = ProcessGroupMesh(self.dp_size, self.sp_size) + self.dp_group = self.pg_mesh.get_group_along_axis(DP_AXIS) + self.sp_group = self.pg_mesh.get_group_along_axis(SP_AXIS) + self.dp_rank = self.pg_mesh.coordinate(DP_AXIS) + self.sp_rank = self.pg_mesh.coordinate(SP_AXIS) + + def __del__(self): + """Destroy the prcess groups in ProcessGroupMesh""" + self.pg_mesh.destroy_mesh_process_groups() + + def prepare_dataloader( + self, + dataset, + batch_size, + shuffle=False, + seed=1024, + drop_last=False, + pin_memory=False, + num_workers=0, + distributed_sampler_cls=None, + **kwargs, + ): + _kwargs = kwargs.copy() + distributed_sampler_cls = distributed_sampler_cls or DistributedSampler + sampler = distributed_sampler_cls(dataset, num_replicas=self.dp_size, rank=self.dp_rank, shuffle=shuffle) + + # Deterministic dataloader + def seed_worker(worker_id): + worker_seed = seed + np.random.seed(worker_seed) + torch.manual_seed(worker_seed) + random.seed(worker_seed) + + return DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + worker_init_fn=seed_worker, + drop_last=drop_last, + pin_memory=pin_memory, + num_workers=num_workers, + **_kwargs, + ) diff --git a/opensora/acceleration/shardformer/modeling/__init__.py b/opensora/acceleration/shardformer/modeling/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/opensora/acceleration/shardformer/modeling/t5.py b/opensora/acceleration/shardformer/modeling/t5.py new file mode 100644 index 0000000000000000000000000000000000000000..9cfb80841c92a57628fba81425627053afc76a3b --- /dev/null +++ b/opensora/acceleration/shardformer/modeling/t5.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn + + +class T5LayerNorm(nn.Module): + def __init__(self, hidden_size, eps=1e-6): + """ + Construct a layernorm module in the T5 style. No bias and no subtraction of mean. + """ + super().__init__() + self.weight = nn.Parameter(torch.ones(hidden_size)) + self.variance_epsilon = eps + + def forward(self, hidden_states): + # T5 uses a layer_norm which only scales and doesn't shift, which is also known as Root Mean + # Square Layer Normalization https://arxiv.org/abs/1910.07467 thus varience is calculated + # w/o mean and there is no bias. Additionally we want to make sure that the accumulation for + # half-precision inputs is done in fp32 + + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon) + + # convert into half-precision if necessary + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return self.weight * hidden_states + + @staticmethod + def from_native_module(module, *args, **kwargs): + assert module.__class__.__name__ == "FusedRMSNorm", ( + "Recovering T5LayerNorm requires the original layer to be apex's Fused RMS Norm." + "Apex's fused norm is automatically used by Hugging Face Transformers https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py#L265C5-L265C48" + ) + + layer_norm = T5LayerNorm(module.normalized_shape, eps=module.eps) + layer_norm.weight.data.copy_(module.weight.data) + layer_norm = layer_norm.to(module.weight.device) + return layer_norm diff --git a/opensora/acceleration/shardformer/policy/__init__.py b/opensora/acceleration/shardformer/policy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/opensora/acceleration/shardformer/policy/t5_encoder.py b/opensora/acceleration/shardformer/policy/t5_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..85c994ecc1a911da5f76b23819148cb1e17b16fa --- /dev/null +++ b/opensora/acceleration/shardformer/policy/t5_encoder.py @@ -0,0 +1,67 @@ +from colossalai.shardformer.modeling.jit import get_jit_fused_dropout_add_func +from colossalai.shardformer.modeling.t5 import get_jit_fused_T5_layer_ff_forward, get_T5_layer_self_attention_forward +from colossalai.shardformer.policies.base_policy import Policy, SubModuleReplacementDescription + + +class T5EncoderPolicy(Policy): + def config_sanity_check(self): + assert not self.shard_config.enable_tensor_parallelism + assert not self.shard_config.enable_flash_attention + + def preprocess(self): + return self.model + + def module_policy(self): + from transformers.models.t5.modeling_t5 import T5LayerFF, T5LayerSelfAttention, T5Stack + + policy = {} + + # check whether apex is installed + try: + from opensora.acceleration.shardformer.modeling.t5 import T5LayerNorm + + # recover hf from fused rms norm to T5 norm which is faster + self.append_or_create_submodule_replacement( + description=SubModuleReplacementDescription( + suffix="layer_norm", + target_module=T5LayerNorm, + ), + policy=policy, + target_key=T5LayerFF, + ) + self.append_or_create_submodule_replacement( + description=SubModuleReplacementDescription(suffix="layer_norm", target_module=T5LayerNorm), + policy=policy, + target_key=T5LayerSelfAttention, + ) + self.append_or_create_submodule_replacement( + description=SubModuleReplacementDescription(suffix="final_layer_norm", target_module=T5LayerNorm), + policy=policy, + target_key=T5Stack, + ) + except (ImportError, ModuleNotFoundError): + pass + + # use jit operator + if self.shard_config.enable_jit_fused: + self.append_or_create_method_replacement( + description={ + "forward": get_jit_fused_T5_layer_ff_forward(), + "dropout_add": get_jit_fused_dropout_add_func(), + }, + policy=policy, + target_key=T5LayerFF, + ) + self.append_or_create_method_replacement( + description={ + "forward": get_T5_layer_self_attention_forward(), + "dropout_add": get_jit_fused_dropout_add_func(), + }, + policy=policy, + target_key=T5LayerSelfAttention, + ) + + return policy + + def postprocess(self): + return self.model diff --git a/opensora/datasets/__init__.py b/opensora/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9b33954089543694925a8ef96b1341a01cb7c70 --- /dev/null +++ b/opensora/datasets/__init__.py @@ -0,0 +1,2 @@ +from .datasets import DatasetFromCSV, get_transforms_image, get_transforms_video +from .utils import prepare_dataloader, save_sample diff --git a/opensora/datasets/datasets.py b/opensora/datasets/datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..9d9317271924d2c8a79778c2f69eb60d15343dda --- /dev/null +++ b/opensora/datasets/datasets.py @@ -0,0 +1,114 @@ +import csv +import os + +import numpy as np +import torch +import torchvision +import torchvision.transforms as transforms +from torchvision.datasets.folder import IMG_EXTENSIONS, pil_loader + +from . import video_transforms +from .utils import center_crop_arr + + +def get_transforms_video(resolution=256): + transform_video = transforms.Compose( + [ + video_transforms.ToTensorVideo(), # TCHW + video_transforms.RandomHorizontalFlipVideo(), + video_transforms.UCFCenterCropVideo(resolution), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + ] + ) + return transform_video + + +def get_transforms_image(image_size=256): + transform = transforms.Compose( + [ + transforms.Lambda(lambda pil_image: center_crop_arr(pil_image, image_size)), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + ] + ) + return transform + + +class DatasetFromCSV(torch.utils.data.Dataset): + """load video according to the csv file. + + Args: + target_video_len (int): the number of video frames will be load. + align_transform (callable): Align different videos in a specified size. + temporal_sample (callable): Sample the target length of a video. + """ + + def __init__( + self, + csv_path, + num_frames=16, + frame_interval=1, + transform=None, + root=None, + ): + self.csv_path = csv_path + with open(csv_path, "r") as f: + reader = csv.reader(f) + self.samples = list(reader) + + ext = self.samples[0][0].split(".")[-1] + if ext.lower() in ("mp4", "avi", "mov", "mkv"): + self.is_video = True + else: + assert f".{ext.lower()}" in IMG_EXTENSIONS, f"Unsupported file format: {ext}" + self.is_video = False + + self.transform = transform + + self.num_frames = num_frames + self.frame_interval = frame_interval + self.temporal_sample = video_transforms.TemporalRandomCrop(num_frames * frame_interval) + self.root = root + + def getitem(self, index): + sample = self.samples[index] + path = sample[0] + if self.root: + path = os.path.join(self.root, path) + text = sample[1] + + if self.is_video: + vframes, aframes, info = torchvision.io.read_video(filename=path, pts_unit="sec", output_format="TCHW") + total_frames = len(vframes) + + # Sampling video frames + start_frame_ind, end_frame_ind = self.temporal_sample(total_frames) + assert ( + end_frame_ind - start_frame_ind >= self.num_frames + ), f"{path} with index {index} has not enough frames." + frame_indice = np.linspace(start_frame_ind, end_frame_ind - 1, self.num_frames, dtype=int) + + video = vframes[frame_indice] + video = self.transform(video) # T C H W + else: + image = pil_loader(path) + image = self.transform(image) + video = image.unsqueeze(0).repeat(self.num_frames, 1, 1, 1) + + # TCHW -> CTHW + video = video.permute(1, 0, 2, 3) + + return {"video": video, "text": text} + + def __getitem__(self, index): + for _ in range(10): + try: + return self.getitem(index) + except Exception as e: + print(e) + index = np.random.randint(len(self)) + raise RuntimeError("Too many bad data.") + + def __len__(self): + return len(self.samples) diff --git a/opensora/datasets/utils.py b/opensora/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cd268ae610ddb557a633c4c15b09ef71df92bdec --- /dev/null +++ b/opensora/datasets/utils.py @@ -0,0 +1,135 @@ +import random +from typing import Iterator, Optional + +import numpy as np +import torch +from PIL import Image +from torch.distributed import ProcessGroup +from torch.distributed.distributed_c10d import _get_default_group +from torch.utils.data import DataLoader, Dataset +from torch.utils.data.distributed import DistributedSampler +from torchvision.io import write_video +from torchvision.utils import save_image + + +def save_sample(x, fps=8, save_path=None, normalize=True, value_range=(-1, 1)): + """ + Args: + x (Tensor): shape [C, T, H, W] + """ + assert x.ndim == 4 + + if x.shape[1] == 1: # T = 1: save as image + save_path += ".png" + x = x.squeeze(1) + save_image([x], save_path, normalize=normalize, value_range=value_range) + else: + save_path += ".mp4" + if normalize: + low, high = value_range + x.clamp_(min=low, max=high) + x.sub_(low).div_(max(high - low, 1e-5)) + + x = x.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 3, 0).to("cpu", torch.uint8) + write_video(save_path, x, fps=fps, video_codec="h264") + print(f"Saved to {save_path}") + + +class StatefulDistributedSampler(DistributedSampler): + def __init__( + self, + dataset: Dataset, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = True, + seed: int = 0, + drop_last: bool = False, + ) -> None: + super().__init__(dataset, num_replicas, rank, shuffle, seed, drop_last) + self.start_index: int = 0 + + def __iter__(self) -> Iterator: + iterator = super().__iter__() + indices = list(iterator) + indices = indices[self.start_index :] + return iter(indices) + + def __len__(self) -> int: + return self.num_samples - self.start_index + + def set_start_index(self, start_index: int) -> None: + self.start_index = start_index + + +def prepare_dataloader( + dataset, + batch_size, + shuffle=False, + seed=1024, + drop_last=False, + pin_memory=False, + num_workers=0, + process_group: Optional[ProcessGroup] = None, + **kwargs, +): + r""" + Prepare a dataloader for distributed training. The dataloader will be wrapped by + `torch.utils.data.DataLoader` and `StatefulDistributedSampler`. + + + Args: + dataset (`torch.utils.data.Dataset`): The dataset to be loaded. + shuffle (bool, optional): Whether to shuffle the dataset. Defaults to False. + seed (int, optional): Random worker seed for sampling, defaults to 1024. + add_sampler: Whether to add ``DistributedDataParallelSampler`` to the dataset. Defaults to True. + drop_last (bool, optional): Set to True to drop the last incomplete batch, if the dataset size + is not divisible by the batch size. If False and the size of dataset is not divisible by + the batch size, then the last batch will be smaller, defaults to False. + pin_memory (bool, optional): Whether to pin memory address in CPU memory. Defaults to False. + num_workers (int, optional): Number of worker threads for this dataloader. Defaults to 0. + kwargs (dict): optional parameters for ``torch.utils.data.DataLoader``, more details could be found in + `DataLoader `_. + + Returns: + :class:`torch.utils.data.DataLoader`: A DataLoader used for training or testing. + """ + _kwargs = kwargs.copy() + process_group = process_group or _get_default_group() + sampler = StatefulDistributedSampler( + dataset, num_replicas=process_group.size(), rank=process_group.rank(), shuffle=shuffle + ) + + # Deterministic dataloader + def seed_worker(worker_id): + worker_seed = seed + np.random.seed(worker_seed) + torch.manual_seed(worker_seed) + random.seed(worker_seed) + + return DataLoader( + dataset, + batch_size=batch_size, + sampler=sampler, + worker_init_fn=seed_worker, + drop_last=drop_last, + pin_memory=pin_memory, + num_workers=num_workers, + **_kwargs, + ) + + +def center_crop_arr(pil_image, image_size): + """ + Center cropping implementation from ADM. + https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126 + """ + while min(*pil_image.size) >= 2 * image_size: + pil_image = pil_image.resize(tuple(x // 2 for x in pil_image.size), resample=Image.BOX) + + scale = image_size / min(*pil_image.size) + pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) + + arr = np.array(pil_image) + crop_y = (arr.shape[0] - image_size) // 2 + crop_x = (arr.shape[1] - image_size) // 2 + return Image.fromarray(arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]) diff --git a/opensora/datasets/video_transforms.py b/opensora/datasets/video_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d1cec8c72a3f1a5fb7d71489b61973d0178580 --- /dev/null +++ b/opensora/datasets/video_transforms.py @@ -0,0 +1,501 @@ +# Copyright 2024 Vchitect/Latte + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.# Modified from Latte + +# - This file is adapted from https://github.com/Vchitect/Latte/blob/main/datasets/video_transforms.py + + +import numbers +import random + +import torch + + +def _is_tensor_video_clip(clip): + if not torch.is_tensor(clip): + raise TypeError("clip should be Tensor. Got %s" % type(clip)) + + if not clip.ndimension() == 4: + raise ValueError("clip should be 4D. Got %dD" % clip.dim()) + + return True + + +def center_crop_arr(pil_image, image_size): + """ + Center cropping implementation from ADM. + https://github.com/openai/guided-diffusion/blob/8fb3ad9197f16bbc40620447b2742e13458d2831/guided_diffusion/image_datasets.py#L126 + """ + while min(*pil_image.size) >= 2 * image_size: + pil_image = pil_image.resize(tuple(x // 2 for x in pil_image.size), resample=Image.BOX) + + scale = image_size / min(*pil_image.size) + pil_image = pil_image.resize(tuple(round(x * scale) for x in pil_image.size), resample=Image.BICUBIC) + + arr = np.array(pil_image) + crop_y = (arr.shape[0] - image_size) // 2 + crop_x = (arr.shape[1] - image_size) // 2 + return Image.fromarray(arr[crop_y : crop_y + image_size, crop_x : crop_x + image_size]) + + +def crop(clip, i, j, h, w): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) + """ + if len(clip.size()) != 4: + raise ValueError("clip should be a 4D tensor") + return clip[..., i : i + h, j : j + w] + + +def resize(clip, target_size, interpolation_mode): + if len(target_size) != 2: + raise ValueError(f"target size should be tuple (height, width), instead got {target_size}") + return torch.nn.functional.interpolate(clip, size=target_size, mode=interpolation_mode, align_corners=False) + + +def resize_scale(clip, target_size, interpolation_mode): + if len(target_size) != 2: + raise ValueError(f"target size should be tuple (height, width), instead got {target_size}") + H, W = clip.size(-2), clip.size(-1) + scale_ = target_size[0] / min(H, W) + return torch.nn.functional.interpolate(clip, scale_factor=scale_, mode=interpolation_mode, align_corners=False) + + +def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): + """ + Do spatial cropping and resizing to the video clip + Args: + clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the cropped region. + w (int): Width of the cropped region. + size (tuple(int, int)): height and width of resized clip + Returns: + clip (torch.tensor): Resized and cropped clip. Size is (T, C, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + clip = crop(clip, i, j, h, w) + clip = resize(clip, size, interpolation_mode) + return clip + + +def center_crop(clip, crop_size): + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + h, w = clip.size(-2), clip.size(-1) + th, tw = crop_size + if h < th or w < tw: + raise ValueError("height and width must be no smaller than crop_size") + + i = int(round((h - th) / 2.0)) + j = int(round((w - tw) / 2.0)) + return crop(clip, i, j, th, tw) + + +def center_crop_using_short_edge(clip): + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + h, w = clip.size(-2), clip.size(-1) + if h < w: + th, tw = h, h + i = 0 + j = int(round((w - tw) / 2.0)) + else: + th, tw = w, w + i = int(round((h - th) / 2.0)) + j = 0 + return crop(clip, i, j, th, tw) + + +def random_shift_crop(clip): + """ + Slide along the long edge, with the short edge as crop size + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + h, w = clip.size(-2), clip.size(-1) + + if h <= w: + short_edge = h + else: + short_edge = w + + th, tw = short_edge, short_edge + + i = torch.randint(0, h - th + 1, size=(1,)).item() + j = torch.randint(0, w - tw + 1, size=(1,)).item() + return crop(clip, i, j, th, tw) + + +def to_tensor(clip): + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, C, H, W) + Return: + clip (torch.tensor, dtype=torch.float): Size is (T, C, H, W) + """ + _is_tensor_video_clip(clip) + if not clip.dtype == torch.uint8: + raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) + # return clip.float().permute(3, 0, 1, 2) / 255.0 + return clip.float() / 255.0 + + +def normalize(clip, mean, std, inplace=False): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (T, C, H, W) + mean (tuple): pixel RGB mean. Size is (3) + std (tuple): pixel standard deviation. Size is (3) + Returns: + normalized clip (torch.tensor): Size is (T, C, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + if not inplace: + clip = clip.clone() + mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) + # print(mean) + std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + return clip + + +def hflip(clip): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (T, C, H, W) + Returns: + flipped clip (torch.tensor): Size is (T, C, H, W) + """ + if not _is_tensor_video_clip(clip): + raise ValueError("clip should be a 4D torch.tensor") + return clip.flip(-1) + + +class RandomCropVideo: + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) + Returns: + torch.tensor: randomly cropped video clip. + size is (T, C, OH, OW) + """ + i, j, h, w = self.get_params(clip) + return crop(clip, i, j, h, w) + + def get_params(self, clip): + h, w = clip.shape[-2:] + th, tw = self.size + + if h < th or w < tw: + raise ValueError(f"Required crop size {(th, tw)} is larger than input image size {(h, w)}") + + if w == tw and h == th: + return 0, 0, h, w + + i = torch.randint(0, h - th + 1, size=(1,)).item() + j = torch.randint(0, w - tw + 1, size=(1,)).item() + + return i, j, th, tw + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size})" + + +class CenterCropResizeVideo: + """ + First use the short side for cropping length, + center crop video, then resize to the specified size + """ + + def __init__( + self, + size, + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + if len(size) != 2: + raise ValueError(f"size should be tuple (height, width), instead got {size}") + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) + Returns: + torch.tensor: scale resized / center cropped video clip. + size is (T, C, crop_size, crop_size) + """ + clip_center_crop = center_crop_using_short_edge(clip) + clip_center_crop_resize = resize( + clip_center_crop, target_size=self.size, interpolation_mode=self.interpolation_mode + ) + return clip_center_crop_resize + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}" + + +class UCFCenterCropVideo: + """ + First scale to the specified size in equal proportion to the short edge, + then center cropping + """ + + def __init__( + self, + size, + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + if len(size) != 2: + raise ValueError(f"size should be tuple (height, width), instead got {size}") + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) + Returns: + torch.tensor: scale resized / center cropped video clip. + size is (T, C, crop_size, crop_size) + """ + clip_resize = resize_scale(clip=clip, target_size=self.size, interpolation_mode=self.interpolation_mode) + clip_center_crop = center_crop(clip_resize, self.size) + return clip_center_crop + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}" + + +class KineticsRandomCropResizeVideo: + """ + Slide along the long edge, with the short edge as crop size. And resie to the desired size. + """ + + def __init__( + self, + size, + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + if len(size) != 2: + raise ValueError(f"size should be tuple (height, width), instead got {size}") + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + + def __call__(self, clip): + clip_random_crop = random_shift_crop(clip) + clip_resize = resize(clip_random_crop, self.size, self.interpolation_mode) + return clip_resize + + +class CenterCropVideo: + def __init__( + self, + size, + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + if len(size) != 2: + raise ValueError(f"size should be tuple (height, width), instead got {size}") + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (T, C, H, W) + Returns: + torch.tensor: center cropped video clip. + size is (T, C, crop_size, crop_size) + """ + clip_center_crop = center_crop(clip, self.size) + return clip_center_crop + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(size={self.size}, interpolation_mode={self.interpolation_mode}" + + +class NormalizeVideo: + """ + Normalize the video clip by mean subtraction and division by standard deviation + Args: + mean (3-tuple): pixel RGB mean + std (3-tuple): pixel RGB standard deviation + inplace (boolean): whether do in-place normalization + """ + + def __init__(self, mean, std, inplace=False): + self.mean = mean + self.std = std + self.inplace = inplace + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): video clip must be normalized. Size is (C, T, H, W) + """ + return normalize(clip, self.mean, self.std, self.inplace) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(mean={self.mean}, std={self.std}, inplace={self.inplace})" + + +class ToTensorVideo: + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + """ + + def __init__(self): + pass + + def __call__(self, clip): + """ + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, C, H, W) + Return: + clip (torch.tensor, dtype=torch.float): Size is (T, C, H, W) + """ + return to_tensor(clip) + + def __repr__(self) -> str: + return self.__class__.__name__ + + +class RandomHorizontalFlipVideo: + """ + Flip the video clip along the horizontal direction with a given probability + Args: + p (float): probability of the clip being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Size is (T, C, H, W) + Return: + clip (torch.tensor): Size is (T, C, H, W) + """ + if random.random() < self.p: + clip = hflip(clip) + return clip + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(p={self.p})" + + +# ------------------------------------------------------------ +# --------------------- Sampling --------------------------- +# ------------------------------------------------------------ +class TemporalRandomCrop(object): + """Temporally crop the given frame indices at a random location. + + Args: + size (int): Desired length of frames will be seen in the model. + """ + + def __init__(self, size): + self.size = size + + def __call__(self, total_frames): + rand_end = max(0, total_frames - self.size - 1) + begin_index = random.randint(0, rand_end) + end_index = min(begin_index + self.size, total_frames) + return begin_index, end_index + + +if __name__ == "__main__": + import os + + import numpy as np + import torchvision.io as io + from torchvision import transforms + from torchvision.utils import save_image + + vframes, aframes, info = io.read_video(filename="./v_Archery_g01_c03.avi", pts_unit="sec", output_format="TCHW") + + trans = transforms.Compose( + [ + ToTensorVideo(), + RandomHorizontalFlipVideo(), + UCFCenterCropVideo(512), + # NormalizeVideo(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5], inplace=True), + ] + ) + + target_video_len = 32 + frame_interval = 1 + total_frames = len(vframes) + print(total_frames) + + temporal_sample = TemporalRandomCrop(target_video_len * frame_interval) + + # Sampling video frames + start_frame_ind, end_frame_ind = temporal_sample(total_frames) + # print(start_frame_ind) + # print(end_frame_ind) + assert end_frame_ind - start_frame_ind >= target_video_len + frame_indice = np.linspace(start_frame_ind, end_frame_ind - 1, target_video_len, dtype=int) + print(frame_indice) + + select_vframes = vframes[frame_indice] + print(select_vframes.shape) + print(select_vframes.dtype) + + select_vframes_trans = trans(select_vframes) + print(select_vframes_trans.shape) + print(select_vframes_trans.dtype) + + select_vframes_trans_int = ((select_vframes_trans * 0.5 + 0.5) * 255).to(dtype=torch.uint8) + print(select_vframes_trans_int.dtype) + print(select_vframes_trans_int.permute(0, 2, 3, 1).shape) + + io.write_video("./test.avi", select_vframes_trans_int.permute(0, 2, 3, 1), fps=8) + + for i in range(target_video_len): + save_image( + select_vframes_trans[i], os.path.join("./test000", "%04d.png" % i), normalize=True, value_range=(-1, 1) + ) diff --git a/opensora/models/__init__.py b/opensora/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..60253499b07d5c9f4e0848d1b76b26fa5d2ea048 --- /dev/null +++ b/opensora/models/__init__.py @@ -0,0 +1,6 @@ +from .dit import * +from .latte import * +from .pixart import * +from .stdit import * +from .text_encoder import * +from .vae import * diff --git a/opensora/models/dit/__init__.py b/opensora/models/dit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..94548a363f00ee5bbd7c5b38eaf53d26a4919b11 --- /dev/null +++ b/opensora/models/dit/__init__.py @@ -0,0 +1 @@ +from .dit import DiT, DiT_XL_2, DiT_XL_2x2 diff --git a/opensora/models/dit/dit.py b/opensora/models/dit/dit.py new file mode 100644 index 0000000000000000000000000000000000000000..a23dd7b56d74f6ea6575429b265a13cba88c64f1 --- /dev/null +++ b/opensora/models/dit/dit.py @@ -0,0 +1,284 @@ +# Modified from Meta DiT + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DiT: https://github.com/facebookresearch/DiT/tree/main +# GLIDE: https://github.com/openai/glide-text2im +# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py +# -------------------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +import torch.utils.checkpoint +from einops import rearrange +from timm.models.vision_transformer import Mlp + +from opensora.acceleration.checkpoint import auto_grad_checkpoint +from opensora.models.layers.blocks import ( + Attention, + CaptionEmbedder, + FinalLayer, + LabelEmbedder, + PatchEmbed3D, + TimestepEmbedder, + approx_gelu, + get_1d_sincos_pos_embed, + get_2d_sincos_pos_embed, + get_layernorm, + modulate, +) +from opensora.registry import MODELS +from opensora.utils.ckpt_utils import load_checkpoint + + +class DiTBlock(nn.Module): + """ + A DiT block with adaptive layer norm zero (adaLN-Zero) conditioning. + """ + + def __init__( + self, + hidden_size, + num_heads, + mlp_ratio=4.0, + enable_flashattn=False, + enable_layernorm_kernel=False, + ): + super().__init__() + self.hidden_size = hidden_size + self.num_heads = num_heads + self.enable_flashattn = enable_flashattn + mlp_hidden_dim = int(hidden_size * mlp_ratio) + + self.norm1 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel) + self.attn = Attention( + hidden_size, + num_heads=num_heads, + qkv_bias=True, + enable_flashattn=enable_flashattn, + ) + self.norm2 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel) + self.mlp = Mlp(in_features=hidden_size, hidden_features=mlp_hidden_dim, act_layer=approx_gelu, drop=0) + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True)) + + def forward(self, x, c): + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.adaLN_modulation(c).chunk(6, dim=1) + x = x + gate_msa.unsqueeze(1) * self.attn(modulate(self.norm1, x, shift_msa, scale_msa)) + x = x + gate_mlp.unsqueeze(1) * self.mlp(modulate(self.norm2, x, shift_mlp, scale_mlp)) + return x + + +@MODELS.register_module() +class DiT(nn.Module): + """ + Diffusion model with a Transformer backbone. + """ + + def __init__( + self, + input_size=(16, 32, 32), + in_channels=4, + patch_size=(1, 2, 2), + hidden_size=1152, + depth=28, + num_heads=16, + mlp_ratio=4.0, + class_dropout_prob=0.1, + learn_sigma=True, + condition="text", + no_temporal_pos_emb=False, + caption_channels=512, + model_max_length=77, + dtype=torch.float32, + enable_flashattn=False, + enable_layernorm_kernel=False, + ): + super().__init__() + self.learn_sigma = learn_sigma + self.in_channels = in_channels + self.out_channels = in_channels * 2 if learn_sigma else in_channels + self.hidden_size = hidden_size + self.patch_size = patch_size + self.input_size = input_size + num_patches = np.prod([input_size[i] // patch_size[i] for i in range(3)]) + self.num_patches = num_patches + self.num_temporal = input_size[0] // patch_size[0] + self.num_spatial = num_patches // self.num_temporal + self.num_heads = num_heads + self.dtype = dtype + self.use_text_encoder = not condition.startswith("label") + if enable_flashattn: + assert dtype in [ + torch.float16, + torch.bfloat16, + ], f"Flash attention only supports float16 and bfloat16, but got {self.dtype}" + self.no_temporal_pos_emb = no_temporal_pos_emb + self.mlp_ratio = mlp_ratio + self.depth = depth + + self.register_buffer("pos_embed_spatial", self.get_spatial_pos_embed()) + self.register_buffer("pos_embed_temporal", self.get_temporal_pos_embed()) + + self.x_embedder = PatchEmbed3D(patch_size, in_channels, embed_dim=hidden_size) + if not self.use_text_encoder: + num_classes = int(condition.split("_")[-1]) + self.y_embedder = LabelEmbedder(num_classes, hidden_size, class_dropout_prob) + else: + self.y_embedder = CaptionEmbedder( + in_channels=caption_channels, + hidden_size=hidden_size, + uncond_prob=class_dropout_prob, + act_layer=approx_gelu, + token_num=1, # pooled token + ) + self.t_embedder = TimestepEmbedder(hidden_size) + self.blocks = nn.ModuleList( + [ + DiTBlock( + hidden_size, + num_heads, + mlp_ratio=mlp_ratio, + enable_flashattn=enable_flashattn, + enable_layernorm_kernel=enable_layernorm_kernel, + ) + for _ in range(depth) + ] + ) + self.final_layer = FinalLayer(hidden_size, np.prod(self.patch_size), self.out_channels) + + self.initialize_weights() + self.enable_flashattn = enable_flashattn + self.enable_layernorm_kernel = enable_layernorm_kernel + + def get_spatial_pos_embed(self): + pos_embed = get_2d_sincos_pos_embed( + self.hidden_size, + self.input_size[1] // self.patch_size[1], + ) + pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False) + return pos_embed + + def get_temporal_pos_embed(self): + pos_embed = get_1d_sincos_pos_embed( + self.hidden_size, + self.input_size[0] // self.patch_size[0], + ) + pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False) + return pos_embed + + def unpatchify(self, x): + c = self.out_channels + t, h, w = [self.input_size[i] // self.patch_size[i] for i in range(3)] + pt, ph, pw = self.patch_size + + x = x.reshape(shape=(x.shape[0], t, h, w, pt, ph, pw, c)) + x = rearrange(x, "n t h w r p q c -> n c t r h p w q") + imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw)) + return imgs + + def forward(self, x, t, y): + """ + Forward pass of DiT. + x: (B, C, T, H, W) tensor of inputs + t: (B,) tensor of diffusion timesteps + y: list of text + """ + # origin inputs should be float32, cast to specified dtype + x = x.to(self.dtype) + + # embedding + x = self.x_embedder(x) # (B, N, D) + x = rearrange(x, "b (t s) d -> b t s d", t=self.num_temporal, s=self.num_spatial) + x = x + self.pos_embed_spatial + if not self.no_temporal_pos_emb: + x = rearrange(x, "b t s d -> b s t d") + x = x + self.pos_embed_temporal + x = rearrange(x, "b s t d -> b (t s) d") + else: + x = rearrange(x, "b t s d -> b (t s) d") + + t = self.t_embedder(t, dtype=x.dtype) # (N, D) + y = self.y_embedder(y, self.training) # (N, D) + if self.use_text_encoder: + y = y.squeeze(1).squeeze(1) + condition = t + y + + # blocks + for _, block in enumerate(self.blocks): + c = condition + x = auto_grad_checkpoint(block, x, c) # (B, N, D) + + # final process + x = self.final_layer(x, condition) # (B, N, num_patches * out_channels) + x = self.unpatchify(x) # (B, out_channels, T, H, W) + + # cast to float32 for better accuracy + x = x.to(torch.float32) + return x + + def initialize_weights(self): + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + if module.weight.requires_grad_: + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize patch_embed like nn.Linear (instead of nn.Conv2d): + w = self.x_embedder.proj.weight.data + nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + nn.init.constant_(self.x_embedder.proj.bias, 0) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + for block in self.blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.linear.weight, 0) + nn.init.constant_(self.final_layer.linear.bias, 0) + + # Zero-out text embedding layers: + if self.use_text_encoder: + nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02) + nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02) + + +@MODELS.register_module("DiT-XL/2") +def DiT_XL_2(from_pretrained=None, **kwargs): + model = DiT( + depth=28, + hidden_size=1152, + patch_size=(1, 2, 2), + num_heads=16, + **kwargs, + ) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model + + +@MODELS.register_module("DiT-XL/2x2") +def DiT_XL_2x2(from_pretrained=None, **kwargs): + model = DiT( + depth=28, + hidden_size=1152, + patch_size=(2, 2, 2), + num_heads=16, + **kwargs, + ) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model diff --git a/opensora/models/latte/__init__.py b/opensora/models/latte/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d918ad01c676a2c2c0dc25f68aa008101773d3 --- /dev/null +++ b/opensora/models/latte/__init__.py @@ -0,0 +1 @@ +from .latte import Latte, Latte_XL_2, Latte_XL_2x2 diff --git a/opensora/models/latte/latte.py b/opensora/models/latte/latte.py new file mode 100644 index 0000000000000000000000000000000000000000..3f8f9685e00b72e601f662b49925d82a57f9e253 --- /dev/null +++ b/opensora/models/latte/latte.py @@ -0,0 +1,112 @@ +# Copyright 2024 Vchitect/Latte +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.# Modified from Latte +# +# +# This file is mofied from https://github.com/Vchitect/Latte/blob/main/models/latte.py +# +# With references to: +# Latte: https://github.com/Vchitect/Latte +# DiT: https://github.com/facebookresearch/DiT/tree/main + + +import torch +from einops import rearrange, repeat + +from opensora.acceleration.checkpoint import auto_grad_checkpoint +from opensora.models.dit import DiT +from opensora.registry import MODELS +from opensora.utils.ckpt_utils import load_checkpoint + + +@MODELS.register_module() +class Latte(DiT): + def forward(self, x, t, y): + """ + Forward pass of DiT. + x: (B, C, T, H, W) tensor of inputs + t: (B,) tensor of diffusion timesteps + y: list of text + """ + # origin inputs should be float32, cast to specified dtype + x = x.to(self.dtype) + + # embedding + x = self.x_embedder(x) # (B, N, D) + x = rearrange(x, "b (t s) d -> b t s d", t=self.num_temporal, s=self.num_spatial) + x = x + self.pos_embed_spatial + x = rearrange(x, "b t s d -> b (t s) d") + + t = self.t_embedder(t, dtype=x.dtype) # (N, D) + y = self.y_embedder(y, self.training) # (N, D) + if self.use_text_encoder: + y = y.squeeze(1).squeeze(1) + condition = t + y + condition_spatial = repeat(condition, "b d -> (b t) d", t=self.num_temporal) + condition_temporal = repeat(condition, "b d -> (b s) d", s=self.num_spatial) + + # blocks + for i, block in enumerate(self.blocks): + if i % 2 == 0: + # spatial + x = rearrange(x, "b (t s) d -> (b t) s d", t=self.num_temporal, s=self.num_spatial) + c = condition_spatial + else: + # temporal + x = rearrange(x, "b (t s) d -> (b s) t d", t=self.num_temporal, s=self.num_spatial) + c = condition_temporal + if i == 1: + x = x + self.pos_embed_temporal + + x = auto_grad_checkpoint(block, x, c) # (B, N, D) + + if i % 2 == 0: + x = rearrange(x, "(b t) s d -> b (t s) d", t=self.num_temporal, s=self.num_spatial) + else: + x = rearrange(x, "(b s) t d -> b (t s) d", t=self.num_temporal, s=self.num_spatial) + + # final process + x = self.final_layer(x, condition) # (B, N, num_patches * out_channels) + x = self.unpatchify(x) # (B, out_channels, T, H, W) + + # cast to float32 for better accuracy + x = x.to(torch.float32) + return x + + +@MODELS.register_module("Latte-XL/2") +def Latte_XL_2(from_pretrained=None, **kwargs): + model = Latte( + depth=28, + hidden_size=1152, + patch_size=(1, 2, 2), + num_heads=16, + **kwargs, + ) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model + + +@MODELS.register_module("Latte-XL/2x2") +def Latte_XL_2x2(from_pretrained=None, **kwargs): + model = Latte( + depth=28, + hidden_size=1152, + patch_size=(2, 2, 2), + num_heads=16, + **kwargs, + ) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model diff --git a/opensora/models/layers/__init__.py b/opensora/models/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/opensora/models/layers/blocks.py b/opensora/models/layers/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..0f8bd59689ef4f52de51a5239c3f1db421cf8e35 --- /dev/null +++ b/opensora/models/layers/blocks.py @@ -0,0 +1,589 @@ +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# PixArt: https://github.com/PixArt-alpha/PixArt-alpha +# Latte: https://github.com/Vchitect/Latte +# DiT: https://github.com/facebookresearch/DiT/tree/main +# GLIDE: https://github.com/openai/glide-text2im +# MAE: https://github.com/facebookresearch/mae/blob/main/models_mae.py +# -------------------------------------------------------- + +import math + +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint +import xformers.ops +from einops import rearrange +from timm.models.vision_transformer import Mlp + +from opensora.acceleration.communications import all_to_all, split_forward_gather_backward +from opensora.acceleration.parallel_states import get_sequence_parallel_group + +approx_gelu = lambda: nn.GELU(approximate="tanh") + + +def get_layernorm(hidden_size: torch.Tensor, eps: float, affine: bool, use_kernel: bool): + if use_kernel: + try: + from apex.normalization import FusedLayerNorm + + return FusedLayerNorm(hidden_size, elementwise_affine=affine, eps=eps) + except ImportError: + raise RuntimeError("FusedLayerNorm not available. Please install apex.") + else: + return nn.LayerNorm(hidden_size, eps, elementwise_affine=affine) + + +def modulate(norm_func, x, shift, scale): + # Suppose x is (B, N, D), shift is (B, D), scale is (B, D) + dtype = x.dtype + x = norm_func(x.to(torch.float32)).to(dtype) + x = x * (scale.unsqueeze(1) + 1) + shift.unsqueeze(1) + x = x.to(dtype) + return x + + +def t2i_modulate(x, shift, scale): + return x * (1 + scale) + shift + + +# =============================================== +# General-purpose Layers +# =============================================== + + +class PatchEmbed3D(nn.Module): + """Video to Patch Embedding. + + Args: + patch_size (int): Patch token size. Default: (2,4,4). + in_chans (int): Number of input video channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__( + self, + patch_size=(2, 4, 4), + in_chans=3, + embed_dim=96, + norm_layer=None, + flatten=True, + ): + super().__init__() + self.patch_size = patch_size + self.flatten = flatten + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv3d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, D, H, W = x.size() + if W % self.patch_size[2] != 0: + x = F.pad(x, (0, self.patch_size[2] - W % self.patch_size[2])) + if H % self.patch_size[1] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[1] - H % self.patch_size[1])) + if D % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, 0, 0, self.patch_size[0] - D % self.patch_size[0])) + + x = self.proj(x) # (B C T H W) + if self.norm is not None: + D, Wh, Ww = x.size(2), x.size(3), x.size(4) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, D, Wh, Ww) + if self.flatten: + x = x.flatten(2).transpose(1, 2) # BCTHW -> BNC + return x + + +class Attention(nn.Module): + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + qk_norm: bool = False, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + norm_layer: nn.Module = nn.LayerNorm, + enable_flashattn: bool = False, + ) -> None: + super().__init__() + assert dim % num_heads == 0, "dim should be divisible by num_heads" + self.dim = dim + self.num_heads = num_heads + self.head_dim = dim // num_heads + self.scale = self.head_dim**-0.5 + self.enable_flashattn = enable_flashattn + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity() + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, N, C = x.shape + qkv = self.qkv(x) + qkv_shape = (B, N, 3, self.num_heads, self.head_dim) + if self.enable_flashattn: + qkv_permute_shape = (2, 0, 1, 3, 4) + else: + qkv_permute_shape = (2, 0, 3, 1, 4) + qkv = qkv.view(qkv_shape).permute(qkv_permute_shape) + q, k, v = qkv.unbind(0) + q, k = self.q_norm(q), self.k_norm(k) + if self.enable_flashattn: + from flash_attn import flash_attn_func + + x = flash_attn_func( + q, + k, + v, + dropout_p=self.attn_drop.p if self.training else 0.0, + softmax_scale=self.scale, + ) + else: + dtype = q.dtype + q = q * self.scale + attn = q @ k.transpose(-2, -1) # translate attn to float32 + attn = attn.to(torch.float32) + attn = attn.softmax(dim=-1) + attn = attn.to(dtype) # cast back attn to original dtype + attn = self.attn_drop(attn) + x = attn @ v + + x_output_shape = (B, N, C) + if not self.enable_flashattn: + x = x.transpose(1, 2) + x = x.reshape(x_output_shape) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SeqParallelAttention(Attention): + def __init__( + self, + dim: int, + num_heads: int = 8, + qkv_bias: bool = False, + qk_norm: bool = False, + attn_drop: float = 0.0, + proj_drop: float = 0.0, + norm_layer: nn.Module = nn.LayerNorm, + enable_flashattn: bool = False, + ) -> None: + super().__init__( + dim=dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_norm=qk_norm, + attn_drop=attn_drop, + proj_drop=proj_drop, + norm_layer=norm_layer, + enable_flashattn=enable_flashattn, + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + B, N, C = x.shape # for sequence parallel here, the N is a local sequence length + qkv = self.qkv(x) + qkv_shape = (B, N, 3, self.num_heads, self.head_dim) + + qkv = qkv.view(qkv_shape) + + sp_group = get_sequence_parallel_group() + + # apply all_to_all to gather sequence and split attention heads + # [B, SUB_N, 3, NUM_HEAD, HEAD_DIM] -> [B, N, 3, NUM_HEAD_PER_DEVICE, HEAD_DIM] + qkv = all_to_all(qkv, sp_group, scatter_dim=3, gather_dim=1) + + if self.enable_flashattn: + qkv_permute_shape = (2, 0, 1, 3, 4) # [3, B, N, NUM_HEAD_PER_DEVICE, HEAD_DIM] + else: + qkv_permute_shape = (2, 0, 3, 1, 4) # [3, B, NUM_HEAD_PER_DEVICE, N, HEAD_DIM] + qkv = qkv.permute(qkv_permute_shape) + + q, k, v = qkv.unbind(0) + q, k = self.q_norm(q), self.k_norm(k) + if self.enable_flashattn: + from flash_attn import flash_attn_func + + x = flash_attn_func( + q, + k, + v, + dropout_p=self.attn_drop.p if self.training else 0.0, + softmax_scale=self.scale, + ) + else: + dtype = q.dtype + q = q * self.scale + attn = q @ k.transpose(-2, -1) # translate attn to float32 + attn = attn.to(torch.float32) + attn = attn.softmax(dim=-1) + attn = attn.to(dtype) # cast back attn to original dtype + attn = self.attn_drop(attn) + x = attn @ v + + if not self.enable_flashattn: + x = x.transpose(1, 2) + + # apply all to all to gather back attention heads and split sequence + # [B, N, NUM_HEAD_PER_DEVICE, HEAD_DIM] -> [B, SUB_N, NUM_HEAD, HEAD_DIM] + x = all_to_all(x, sp_group, scatter_dim=1, gather_dim=2) + + # reshape outputs back to [B, N, C] + x_output_shape = (B, N, C) + x = x.reshape(x_output_shape) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class MultiHeadCrossAttention(nn.Module): + def __init__(self, d_model, num_heads, attn_drop=0.0, proj_drop=0.0): + super(MultiHeadCrossAttention, self).__init__() + assert d_model % num_heads == 0, "d_model must be divisible by num_heads" + + self.d_model = d_model + self.num_heads = num_heads + self.head_dim = d_model // num_heads + + self.q_linear = nn.Linear(d_model, d_model) + self.kv_linear = nn.Linear(d_model, d_model * 2) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(d_model, d_model) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, cond, mask=None): + # query/value: img tokens; key: condition; mask: if padding tokens + B, N, C = x.shape + + q = self.q_linear(x).view(1, -1, self.num_heads, self.head_dim) + kv = self.kv_linear(cond).view(1, -1, 2, self.num_heads, self.head_dim) + k, v = kv.unbind(2) + + attn_bias = None + if mask is not None: + attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask) + x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias) + + x = x.view(B, -1, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SeqParallelMultiHeadCrossAttention(MultiHeadCrossAttention): + def __init__( + self, + d_model, + num_heads, + attn_drop=0.0, + proj_drop=0.0, + ): + super().__init__(d_model=d_model, num_heads=num_heads, attn_drop=attn_drop, proj_drop=proj_drop) + + def forward(self, x, cond, mask=None): + # query/value: img tokens; key: condition; mask: if padding tokens + sp_group = get_sequence_parallel_group() + sp_size = dist.get_world_size(sp_group) + B, SUB_N, C = x.shape + N = SUB_N * sp_size + + # shape: + # q, k, v: [B, SUB_N, NUM_HEADS, HEAD_DIM] + q = self.q_linear(x).view(B, -1, self.num_heads, self.head_dim) + kv = self.kv_linear(cond).view(B, -1, 2, self.num_heads, self.head_dim) + k, v = kv.unbind(2) + + # apply all_to_all to gather sequence and split attention heads + q = all_to_all(q, sp_group, scatter_dim=2, gather_dim=1) + + k = split_forward_gather_backward(k, get_sequence_parallel_group(), dim=2, grad_scale="down") + v = split_forward_gather_backward(v, get_sequence_parallel_group(), dim=2, grad_scale="down") + + q = q.view(1, -1, self.num_heads // sp_size, self.head_dim) + k = k.view(1, -1, self.num_heads // sp_size, self.head_dim) + v = v.view(1, -1, self.num_heads // sp_size, self.head_dim) + + # compute attention + attn_bias = None + if mask is not None: + attn_bias = xformers.ops.fmha.BlockDiagonalMask.from_seqlens([N] * B, mask) + x = xformers.ops.memory_efficient_attention(q, k, v, p=self.attn_drop.p, attn_bias=attn_bias) + + # apply all to all to gather back attention heads and scatter sequence + x = x.view(B, -1, self.num_heads // sp_size, self.head_dim) + x = all_to_all(x, sp_group, scatter_dim=1, gather_dim=2) + + # apply output projection + x = x.view(B, -1, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class FinalLayer(nn.Module): + """ + The final layer of DiT. + """ + + def __init__(self, hidden_size, num_patch, out_channels): + super().__init__() + self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.linear = nn.Linear(hidden_size, num_patch * out_channels, bias=True) + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True)) + + def forward(self, x, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=1) + x = modulate(self.norm_final, x, shift, scale) + x = self.linear(x) + return x + + +class T2IFinalLayer(nn.Module): + """ + The final layer of PixArt. + """ + + def __init__(self, hidden_size, num_patch, out_channels): + super().__init__() + self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6) + self.linear = nn.Linear(hidden_size, num_patch * out_channels, bias=True) + self.scale_shift_table = nn.Parameter(torch.randn(2, hidden_size) / hidden_size**0.5) + self.out_channels = out_channels + + def forward(self, x, t): + shift, scale = (self.scale_shift_table[None] + t[:, None]).chunk(2, dim=1) + x = t2i_modulate(self.norm_final(x), shift, scale) + x = self.linear(x) + return x + + +# =============================================== +# Embedding Layers for Timesteps and Class Labels +# =============================================== + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True), + nn.SiLU(), + nn.Linear(hidden_size, hidden_size, bias=True), + ) + self.frequency_embedding_size = frequency_embedding_size + + @staticmethod + def timestep_embedding(t, dim, max_period=10000): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + half = dim // 2 + freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half) + freqs = freqs.to(device=t.device) + args = t[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + return embedding + + def forward(self, t, dtype): + t_freq = self.timestep_embedding(t, self.frequency_embedding_size) + if t_freq.dtype != dtype: + t_freq = t_freq.to(dtype) + t_emb = self.mlp(t_freq) + return t_emb + + +class LabelEmbedder(nn.Module): + """ + Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. + """ + + def __init__(self, num_classes, hidden_size, dropout_prob): + super().__init__() + use_cfg_embedding = dropout_prob > 0 + self.embedding_table = nn.Embedding(num_classes + use_cfg_embedding, hidden_size) + self.num_classes = num_classes + self.dropout_prob = dropout_prob + + def token_drop(self, labels, force_drop_ids=None): + """ + Drops labels to enable classifier-free guidance. + """ + if force_drop_ids is None: + drop_ids = torch.rand(labels.shape[0]).cuda() < self.dropout_prob + else: + drop_ids = force_drop_ids == 1 + labels = torch.where(drop_ids, self.num_classes, labels) + return labels + + def forward(self, labels, train, force_drop_ids=None): + use_dropout = self.dropout_prob > 0 + if (train and use_dropout) or (force_drop_ids is not None): + labels = self.token_drop(labels, force_drop_ids) + return self.embedding_table(labels) + + +class SizeEmbedder(TimestepEmbedder): + """ + Embeds scalar timesteps into vector representations. + """ + + def __init__(self, hidden_size, frequency_embedding_size=256): + super().__init__(hidden_size=hidden_size, frequency_embedding_size=frequency_embedding_size) + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, hidden_size, bias=True), + nn.SiLU(), + nn.Linear(hidden_size, hidden_size, bias=True), + ) + self.frequency_embedding_size = frequency_embedding_size + self.outdim = hidden_size + + def forward(self, s, bs): + if s.ndim == 1: + s = s[:, None] + assert s.ndim == 2 + if s.shape[0] != bs: + s = s.repeat(bs // s.shape[0], 1) + assert s.shape[0] == bs + b, dims = s.shape[0], s.shape[1] + s = rearrange(s, "b d -> (b d)") + s_freq = self.timestep_embedding(s, self.frequency_embedding_size).to(self.dtype) + s_emb = self.mlp(s_freq) + s_emb = rearrange(s_emb, "(b d) d2 -> b (d d2)", b=b, d=dims, d2=self.outdim) + return s_emb + + @property + def dtype(self): + return next(self.parameters()).dtype + + +class CaptionEmbedder(nn.Module): + """ + Embeds class labels into vector representations. Also handles label dropout for classifier-free guidance. + """ + + def __init__(self, in_channels, hidden_size, uncond_prob, act_layer=nn.GELU(approximate="tanh"), token_num=120): + super().__init__() + self.y_proj = Mlp( + in_features=in_channels, hidden_features=hidden_size, out_features=hidden_size, act_layer=act_layer, drop=0 + ) + self.register_buffer("y_embedding", nn.Parameter(torch.randn(token_num, in_channels) / in_channels**0.5)) + self.uncond_prob = uncond_prob + + def token_drop(self, caption, force_drop_ids=None): + """ + Drops labels to enable classifier-free guidance. + """ + if force_drop_ids is None: + drop_ids = torch.rand(caption.shape[0]).cuda() < self.uncond_prob + else: + drop_ids = force_drop_ids == 1 + caption = torch.where(drop_ids[:, None, None, None], self.y_embedding, caption) + return caption + + def forward(self, caption, train, force_drop_ids=None): + if train: + assert caption.shape[2:] == self.y_embedding.shape + use_dropout = self.uncond_prob > 0 + if (train and use_dropout) or (force_drop_ids is not None): + caption = self.token_drop(caption, force_drop_ids) + caption = self.y_proj(caption) + return caption + + +# =============================================== +# Sine/Cosine Positional Embedding Functions +# =============================================== +# https://github.com/facebookresearch/mae/blob/main/util/pos_embed.py + + +def get_2d_sincos_pos_embed(embed_dim, grid_size, cls_token=False, extra_tokens=0, scale=1.0, base_size=None): + """ + grid_size: int of the grid height and width + return: + pos_embed: [grid_size*grid_size, embed_dim] or [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + if not isinstance(grid_size, tuple): + grid_size = (grid_size, grid_size) + + grid_h = np.arange(grid_size[0], dtype=np.float32) / scale + grid_w = np.arange(grid_size[1], dtype=np.float32) / scale + if base_size is not None: + grid_h *= base_size / grid_size[0] + grid_w *= base_size / grid_size[1] + grid = np.meshgrid(grid_w, grid_h) # here w goes first + grid = np.stack(grid, axis=0) + + grid = grid.reshape([2, 1, grid_size[1], grid_size[0]]) + pos_embed = get_2d_sincos_pos_embed_from_grid(embed_dim, grid) + if cls_token and extra_tokens > 0: + pos_embed = np.concatenate([np.zeros([extra_tokens, embed_dim]), pos_embed], axis=0) + return pos_embed + + +def get_2d_sincos_pos_embed_from_grid(embed_dim, grid): + assert embed_dim % 2 == 0 + + # use half of dimensions to encode grid_h + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[0]) # (H*W, D/2) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 2, grid[1]) # (H*W, D/2) + + emb = np.concatenate([emb_h, emb_w], axis=1) # (H*W, D) + return emb + + +def get_1d_sincos_pos_embed(embed_dim, length, scale=1.0): + pos = np.arange(0, length)[..., None] / scale + return get_1d_sincos_pos_embed_from_grid(embed_dim, pos) + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position + pos: a list of positions to be encoded: size (M,) + out: (M, D) + """ + assert embed_dim % 2 == 0 + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos = pos.reshape(-1) # (M,) + out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=1) # (M, D) + return emb diff --git a/opensora/models/pixart/__init__.py b/opensora/models/pixart/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cf8320211a82bd0a4689b2afa9b600adeee6cfeb --- /dev/null +++ b/opensora/models/pixart/__init__.py @@ -0,0 +1 @@ +from .pixart import PixArt, PixArt_XL_2 diff --git a/opensora/models/pixart/pixart.py b/opensora/models/pixart/pixart.py new file mode 100644 index 0000000000000000000000000000000000000000..849470ae438aebec8b700e429a03857d9125e76f --- /dev/null +++ b/opensora/models/pixart/pixart.py @@ -0,0 +1,389 @@ +# Adapted from PixArt +# +# Copyright (C) 2023 PixArt-alpha/PixArt-alpha +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# PixArt: https://github.com/PixArt-alpha/PixArt-alpha +# DiT: https://github.com/facebookresearch/DiT/tree/main +# -------------------------------------------------------- + +import numpy as np +import torch +import torch.nn as nn +from einops import rearrange +from timm.models.layers import DropPath +from timm.models.vision_transformer import Mlp + +# from .builder import MODELS +from opensora.acceleration.checkpoint import auto_grad_checkpoint +from opensora.models.layers.blocks import ( + Attention, + CaptionEmbedder, + MultiHeadCrossAttention, + PatchEmbed3D, + SeqParallelAttention, + SeqParallelMultiHeadCrossAttention, + SizeEmbedder, + T2IFinalLayer, + TimestepEmbedder, + approx_gelu, + get_1d_sincos_pos_embed, + get_2d_sincos_pos_embed, + get_layernorm, + t2i_modulate, +) +from opensora.registry import MODELS +from opensora.utils.ckpt_utils import load_checkpoint + + +class PixArtBlock(nn.Module): + """ + A PixArt block with adaptive layer norm (adaLN-single) conditioning. + """ + + def __init__( + self, + hidden_size, + num_heads, + mlp_ratio=4.0, + drop_path=0.0, + enable_flashattn=False, + enable_layernorm_kernel=False, + enable_sequence_parallelism=False, + ): + super().__init__() + self.hidden_size = hidden_size + self.enable_flashattn = enable_flashattn + self._enable_sequence_parallelism = enable_sequence_parallelism + + if enable_sequence_parallelism: + self.attn_cls = SeqParallelAttention + self.mha_cls = SeqParallelMultiHeadCrossAttention + else: + self.attn_cls = Attention + self.mha_cls = MultiHeadCrossAttention + + self.norm1 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel) + self.attn = self.attn_cls( + hidden_size, + num_heads=num_heads, + qkv_bias=True, + enable_flashattn=enable_flashattn, + ) + self.cross_attn = self.mha_cls(hidden_size, num_heads) + self.norm2 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel) + self.mlp = Mlp( + in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0 + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size**0.5) + + def forward(self, x, y, t, mask=None): + B, N, C = x.shape + + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( + self.scale_shift_table[None] + t.reshape(B, 6, -1) + ).chunk(6, dim=1) + x = x + self.drop_path(gate_msa * self.attn(t2i_modulate(self.norm1(x), shift_msa, scale_msa)).reshape(B, N, C)) + x = x + self.cross_attn(x, y, mask) + x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp))) + + return x + + +@MODELS.register_module() +class PixArt(nn.Module): + """ + Diffusion model with a Transformer backbone. + """ + + def __init__( + self, + input_size=(1, 32, 32), + in_channels=4, + patch_size=(1, 2, 2), + hidden_size=1152, + depth=28, + num_heads=16, + mlp_ratio=4.0, + class_dropout_prob=0.1, + pred_sigma=True, + drop_path: float = 0.0, + no_temporal_pos_emb=False, + caption_channels=4096, + model_max_length=120, + dtype=torch.float32, + freeze=None, + space_scale=1.0, + time_scale=1.0, + enable_flashattn=False, + enable_layernorm_kernel=False, + ): + super().__init__() + self.pred_sigma = pred_sigma + self.in_channels = in_channels + self.out_channels = in_channels * 2 if pred_sigma else in_channels + self.hidden_size = hidden_size + self.patch_size = patch_size + self.input_size = input_size + num_patches = np.prod([input_size[i] // patch_size[i] for i in range(3)]) + self.num_patches = num_patches + self.num_temporal = input_size[0] // patch_size[0] + self.num_spatial = num_patches // self.num_temporal + self.base_size = int(np.sqrt(self.num_spatial)) + self.num_heads = num_heads + self.dtype = dtype + self.no_temporal_pos_emb = no_temporal_pos_emb + self.depth = depth + self.mlp_ratio = mlp_ratio + self.enable_flashattn = enable_flashattn + self.enable_layernorm_kernel = enable_layernorm_kernel + self.space_scale = space_scale + self.time_scale = time_scale + + self.x_embedder = PatchEmbed3D(patch_size, in_channels, hidden_size) + self.t_embedder = TimestepEmbedder(hidden_size) + self.t_block = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True)) + self.y_embedder = CaptionEmbedder( + in_channels=caption_channels, + hidden_size=hidden_size, + uncond_prob=class_dropout_prob, + act_layer=approx_gelu, + token_num=model_max_length, + ) + + self.register_buffer("pos_embed", self.get_spatial_pos_embed()) + self.register_buffer("pos_embed_temporal", self.get_temporal_pos_embed()) + + drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule + self.blocks = nn.ModuleList( + [ + PixArtBlock( + hidden_size, + num_heads, + mlp_ratio=mlp_ratio, + drop_path=drop_path[i], + enable_flashattn=enable_flashattn, + enable_layernorm_kernel=enable_layernorm_kernel, + ) + for i in range(depth) + ] + ) + self.final_layer = T2IFinalLayer(hidden_size, np.prod(self.patch_size), self.out_channels) + + self.initialize_weights() + if freeze is not None: + assert freeze in ["text"] + if freeze == "text": + self.freeze_text() + + def forward(self, x, timestep, y, mask=None): + """ + Forward pass of PixArt. + x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) + t: (N,) tensor of diffusion timesteps + y: (N, 1, 120, C) tensor of class labels + """ + x = x.to(self.dtype) + timestep = timestep.to(self.dtype) + y = y.to(self.dtype) + + # embedding + x = self.x_embedder(x) # (B, N, D) + x = rearrange(x, "b (t s) d -> b t s d", t=self.num_temporal, s=self.num_spatial) + x = x + self.pos_embed + if not self.no_temporal_pos_emb: + x = rearrange(x, "b t s d -> b s t d") + x = x + self.pos_embed_temporal + x = rearrange(x, "b s t d -> b (t s) d") + else: + x = rearrange(x, "b t s d -> b (t s) d") + + t = self.t_embedder(timestep, dtype=x.dtype) # (N, D) + t0 = self.t_block(t) + y = self.y_embedder(y, self.training) # (N, 1, L, D) + if mask is not None: + if mask.shape[0] != y.shape[0]: + mask = mask.repeat(y.shape[0] // mask.shape[0], 1) + mask = mask.squeeze(1).squeeze(1) + y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) + y_lens = mask.sum(dim=1).tolist() + else: + y_lens = [y.shape[2]] * y.shape[0] + y = y.squeeze(1).view(1, -1, x.shape[-1]) + + # blocks + for block in self.blocks: + x = auto_grad_checkpoint(block, x, y, t0, y_lens) + + # final process + x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels) + x = self.unpatchify(x) # (N, out_channels, H, W) + + # cast to float32 for better accuracy + x = x.to(torch.float32) + return x + + def unpatchify(self, x): + c = self.out_channels + t, h, w = [self.input_size[i] // self.patch_size[i] for i in range(3)] + pt, ph, pw = self.patch_size + + x = x.reshape(shape=(x.shape[0], t, h, w, pt, ph, pw, c)) + x = rearrange(x, "n t h w r p q c -> n c t r h p w q") + imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw)) + return imgs + + def get_spatial_pos_embed(self, grid_size=None): + if grid_size is None: + grid_size = self.input_size[1:] + pos_embed = get_2d_sincos_pos_embed( + self.hidden_size, + (grid_size[0] // self.patch_size[1], grid_size[1] // self.patch_size[2]), + scale=self.space_scale, + base_size=self.base_size, + ) + pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False) + return pos_embed + + def get_temporal_pos_embed(self): + pos_embed = get_1d_sincos_pos_embed( + self.hidden_size, + self.input_size[0] // self.patch_size[0], + scale=self.time_scale, + ) + pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False) + return pos_embed + + def freeze_text(self): + for n, p in self.named_parameters(): + if "cross_attn" in n: + p.requires_grad = False + + def initialize_weights(self): + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize patch_embed like nn.Linear (instead of nn.Conv2d): + w = self.x_embedder.proj.weight.data + nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + nn.init.normal_(self.t_block[1].weight, std=0.02) + + # Initialize caption embedding MLP: + nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02) + nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02) + + # Zero-out adaLN modulation layers in PixArt blocks: + for block in self.blocks: + nn.init.constant_(block.cross_attn.proj.weight, 0) + nn.init.constant_(block.cross_attn.proj.bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.linear.weight, 0) + nn.init.constant_(self.final_layer.linear.bias, 0) + + +@MODELS.register_module() +class PixArtMS(PixArt): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + assert self.hidden_size % 3 == 0, "hidden_size must be divisible by 3" + self.csize_embedder = SizeEmbedder(self.hidden_size // 3) + self.ar_embedder = SizeEmbedder(self.hidden_size // 3) + + def forward(self, x, timestep, y, mask=None, data_info=None): + """ + Forward pass of PixArt. + x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images) + t: (N,) tensor of diffusion timesteps + y: (N, 1, 120, C) tensor of class labels + """ + x = x.to(self.dtype) + timestep = timestep.to(self.dtype) + y = y.to(self.dtype) + + c_size = data_info["hw"] + ar = data_info["ar"] + pos_embed = self.get_spatial_pos_embed((x.shape[-2], x.shape[-1])).to(x.dtype) + + # embedding + x = self.x_embedder(x) # (B, N, D) + x = rearrange(x, "b (t s) d -> b t s d", t=self.num_temporal, s=self.num_spatial) + x = x + pos_embed.to(x.device) + if not self.no_temporal_pos_emb: + x = rearrange(x, "b t s d -> b s t d") + x = x + self.pos_embed_temporal + x = rearrange(x, "b s t d -> b (t s) d") + else: + x = rearrange(x, "b t s d -> b (t s) d") + + t = self.t_embedder(timestep, dtype=x.dtype) # (N, D) + B = x.shape[0] + csize = self.csize_embedder(c_size, B) + ar = self.ar_embedder(ar, B) + t = t + torch.cat([csize, ar], dim=1) + + t0 = self.t_block(t) + y = self.y_embedder(y, self.training) # (N, 1, L, D) + if mask is not None: + if mask.shape[0] != y.shape[0]: + mask = mask.repeat(y.shape[0] // mask.shape[0], 1) + mask = mask.squeeze(1).squeeze(1) + y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) + y_lens = mask.sum(dim=1).tolist() + else: + y_lens = [y.shape[2]] * y.shape[0] + y = y.squeeze(1).view(1, -1, x.shape[-1]) + + # blocks + for block in self.blocks: + x = block(x, y, t0, y_lens) + + # final process + x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels) + x = self.unpatchify(x) # (N, out_channels, H, W) + + # cast to float32 for better accuracy + x = x.to(torch.float32) + return x + + +@MODELS.register_module("PixArt-XL/2") +def PixArt_XL_2(from_pretrained=None, **kwargs): + model = PixArt(depth=28, hidden_size=1152, patch_size=(1, 2, 2), num_heads=16, **kwargs) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model + + +@MODELS.register_module("PixArtMS-XL/2") +def PixArtMS_XL_2(from_pretrained=None, **kwargs): + model = PixArtMS(depth=28, hidden_size=1152, patch_size=(1, 2, 2), num_heads=16, **kwargs) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model diff --git a/opensora/models/stdit/__init__.py b/opensora/models/stdit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5ca2cc91f8316e80a7e594e432431913b447207b --- /dev/null +++ b/opensora/models/stdit/__init__.py @@ -0,0 +1 @@ +from .stdit import STDiT diff --git a/opensora/models/stdit/stdit.py b/opensora/models/stdit/stdit.py new file mode 100644 index 0000000000000000000000000000000000000000..68db681544a1ceded922ee450cf26bb3e1185a5f --- /dev/null +++ b/opensora/models/stdit/stdit.py @@ -0,0 +1,388 @@ +import numpy as np +import torch +import torch.distributed as dist +import torch.nn as nn +from einops import rearrange +from timm.models.layers import DropPath +from timm.models.vision_transformer import Mlp + +from opensora.acceleration.checkpoint import auto_grad_checkpoint +from opensora.acceleration.communications import gather_forward_split_backward, split_forward_gather_backward +from opensora.acceleration.parallel_states import get_sequence_parallel_group +from opensora.models.layers.blocks import ( + Attention, + CaptionEmbedder, + MultiHeadCrossAttention, + PatchEmbed3D, + SeqParallelAttention, + SeqParallelMultiHeadCrossAttention, + T2IFinalLayer, + TimestepEmbedder, + approx_gelu, + get_1d_sincos_pos_embed, + get_2d_sincos_pos_embed, + get_layernorm, + t2i_modulate, +) +from opensora.registry import MODELS +from opensora.utils.ckpt_utils import load_checkpoint + + +class STDiTBlock(nn.Module): + def __init__( + self, + hidden_size, + num_heads, + d_s=None, + d_t=None, + mlp_ratio=4.0, + drop_path=0.0, + enable_flashattn=False, + enable_layernorm_kernel=False, + enable_sequence_parallelism=False, + ): + super().__init__() + self.hidden_size = hidden_size + self.enable_flashattn = enable_flashattn + self._enable_sequence_parallelism = enable_sequence_parallelism + + if enable_sequence_parallelism: + self.attn_cls = SeqParallelAttention + self.mha_cls = SeqParallelMultiHeadCrossAttention + else: + self.attn_cls = Attention + self.mha_cls = MultiHeadCrossAttention + + self.norm1 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel) + self.attn = self.attn_cls( + hidden_size, + num_heads=num_heads, + qkv_bias=True, + enable_flashattn=enable_flashattn, + ) + self.cross_attn = self.mha_cls(hidden_size, num_heads) + self.norm2 = get_layernorm(hidden_size, eps=1e-6, affine=False, use_kernel=enable_layernorm_kernel) + self.mlp = Mlp( + in_features=hidden_size, hidden_features=int(hidden_size * mlp_ratio), act_layer=approx_gelu, drop=0 + ) + self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() + self.scale_shift_table = nn.Parameter(torch.randn(6, hidden_size) / hidden_size**0.5) + + # temporal attention + self.d_s = d_s + self.d_t = d_t + + if self._enable_sequence_parallelism: + sp_size = dist.get_world_size(get_sequence_parallel_group()) + # make sure d_t is divisible by sp_size + assert d_t % sp_size == 0 + self.d_t = d_t // sp_size + + self.attn_temp = self.attn_cls( + hidden_size, + num_heads=num_heads, + qkv_bias=True, + enable_flashattn=self.enable_flashattn, + ) + + def forward(self, x, y, t, mask=None, tpe=None): + B, N, C = x.shape + + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( + self.scale_shift_table[None] + t.reshape(B, 6, -1) + ).chunk(6, dim=1) + x_m = t2i_modulate(self.norm1(x), shift_msa, scale_msa) + + # spatial branch + x_s = rearrange(x_m, "B (T S) C -> (B T) S C", T=self.d_t, S=self.d_s) + x_s = self.attn(x_s) + x_s = rearrange(x_s, "(B T) S C -> B (T S) C", T=self.d_t, S=self.d_s) + x = x + self.drop_path(gate_msa * x_s) + + # temporal branch + x_t = rearrange(x, "B (T S) C -> (B S) T C", T=self.d_t, S=self.d_s) + if tpe is not None: + x_t = x_t + tpe + x_t = self.attn_temp(x_t) + x_t = rearrange(x_t, "(B S) T C -> B (T S) C", T=self.d_t, S=self.d_s) + x = x + self.drop_path(gate_msa * x_t) + + # cross attn + x = x + self.cross_attn(x, y, mask) + + # mlp + x = x + self.drop_path(gate_mlp * self.mlp(t2i_modulate(self.norm2(x), shift_mlp, scale_mlp))) + + return x + + +@MODELS.register_module() +class STDiT(nn.Module): + def __init__( + self, + input_size=(1, 32, 32), + in_channels=4, + patch_size=(1, 2, 2), + hidden_size=1152, + depth=28, + num_heads=16, + mlp_ratio=4.0, + class_dropout_prob=0.1, + pred_sigma=True, + drop_path=0.0, + no_temporal_pos_emb=False, + caption_channels=4096, + model_max_length=120, + dtype=torch.float32, + space_scale=1.0, + time_scale=1.0, + freeze=None, + enable_flashattn=False, + enable_layernorm_kernel=False, + enable_sequence_parallelism=False, + ): + super().__init__() + self.pred_sigma = pred_sigma + self.in_channels = in_channels + self.out_channels = in_channels * 2 if pred_sigma else in_channels + self.hidden_size = hidden_size + self.patch_size = patch_size + self.input_size = input_size + num_patches = np.prod([input_size[i] // patch_size[i] for i in range(3)]) + self.num_patches = num_patches + self.num_temporal = input_size[0] // patch_size[0] + self.num_spatial = num_patches // self.num_temporal + self.num_heads = num_heads + self.dtype = dtype + self.no_temporal_pos_emb = no_temporal_pos_emb + self.depth = depth + self.mlp_ratio = mlp_ratio + self.enable_flashattn = enable_flashattn + self.enable_layernorm_kernel = enable_layernorm_kernel + self.space_scale = space_scale + self.time_scale = time_scale + + self.register_buffer("pos_embed", self.get_spatial_pos_embed()) + self.register_buffer("pos_embed_temporal", self.get_temporal_pos_embed()) + + self.x_embedder = PatchEmbed3D(patch_size, in_channels, hidden_size) + self.t_embedder = TimestepEmbedder(hidden_size) + self.t_block = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 6 * hidden_size, bias=True)) + self.y_embedder = CaptionEmbedder( + in_channels=caption_channels, + hidden_size=hidden_size, + uncond_prob=class_dropout_prob, + act_layer=approx_gelu, + token_num=model_max_length, + ) + + drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] + self.blocks = nn.ModuleList( + [ + STDiTBlock( + self.hidden_size, + self.num_heads, + mlp_ratio=self.mlp_ratio, + drop_path=drop_path[i], + enable_flashattn=self.enable_flashattn, + enable_layernorm_kernel=self.enable_layernorm_kernel, + enable_sequence_parallelism=enable_sequence_parallelism, + d_t=self.num_temporal, + d_s=self.num_spatial, + ) + for i in range(self.depth) + ] + ) + self.final_layer = T2IFinalLayer(hidden_size, np.prod(self.patch_size), self.out_channels) + + # init model + self.initialize_weights() + self.initialize_temporal() + if freeze is not None: + assert freeze in ["not_temporal", "text"] + if freeze == "not_temporal": + self.freeze_not_temporal() + elif freeze == "text": + self.freeze_text() + + # sequence parallel related configs + self.enable_sequence_parallelism = enable_sequence_parallelism + if enable_sequence_parallelism: + self.sp_rank = dist.get_rank(get_sequence_parallel_group()) + else: + self.sp_rank = None + + def forward(self, x, timestep, y, mask=None): + """ + Forward pass of STDiT. + Args: + x (torch.Tensor): latent representation of video; of shape [B, C, T, H, W] + timestep (torch.Tensor): diffusion time steps; of shape [B] + y (torch.Tensor): representation of prompts; of shape [B, 1, N_token, C] + mask (torch.Tensor): mask for selecting prompt tokens; of shape [B, N_token] + + Returns: + x (torch.Tensor): output latent representation; of shape [B, C, T, H, W] + """ + + x = x.to(self.dtype) + timestep = timestep.to(self.dtype) + y = y.to(self.dtype) + + # embedding + x = self.x_embedder(x) # [B, N, C] + x = rearrange(x, "B (T S) C -> B T S C", T=self.num_temporal, S=self.num_spatial) + x = x + self.pos_embed + x = rearrange(x, "B T S C -> B (T S) C") + + # shard over the sequence dim if sp is enabled + if self.enable_sequence_parallelism: + x = split_forward_gather_backward(x, get_sequence_parallel_group(), dim=1, grad_scale="down") + + t = self.t_embedder(timestep, dtype=x.dtype) # [B, C] + t0 = self.t_block(t) # [B, C] + y = self.y_embedder(y, self.training) # [B, 1, N_token, C] + + if mask is not None: + if mask.shape[0] != y.shape[0]: + mask = mask.repeat(y.shape[0] // mask.shape[0], 1) + mask = mask.squeeze(1).squeeze(1) + y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1]) + y_lens = mask.sum(dim=1).tolist() + else: + y_lens = [y.shape[2]] * y.shape[0] + y = y.squeeze(1).view(1, -1, x.shape[-1]) + + # blocks + for i, block in enumerate(self.blocks): + if i == 0: + if self.enable_sequence_parallelism: + tpe = torch.chunk( + self.pos_embed_temporal, dist.get_world_size(get_sequence_parallel_group()), dim=1 + )[self.sp_rank].contiguous() + else: + tpe = self.pos_embed_temporal + else: + tpe = None + x = auto_grad_checkpoint(block, x, y, t0, y_lens, tpe) + + if self.enable_sequence_parallelism: + x = gather_forward_split_backward(x, get_sequence_parallel_group(), dim=1, grad_scale="up") + # x.shape: [B, N, C] + + # final process + x = self.final_layer(x, t) # [B, N, C=T_p * H_p * W_p * C_out] + x = self.unpatchify(x) # [B, C_out, T, H, W] + + # cast to float32 for better accuracy + x = x.to(torch.float32) + return x + + def unpatchify(self, x): + """ + Args: + x (torch.Tensor): of shape [B, N, C] + + Return: + x (torch.Tensor): of shape [B, C_out, T, H, W] + """ + + N_t, N_h, N_w = [self.input_size[i] // self.patch_size[i] for i in range(3)] + T_p, H_p, W_p = self.patch_size + x = rearrange( + x, + "B (N_t N_h N_w) (T_p H_p W_p C_out) -> B C_out (N_t T_p) (N_h H_p) (N_w W_p)", + N_t=N_t, + N_h=N_h, + N_w=N_w, + T_p=T_p, + H_p=H_p, + W_p=W_p, + C_out=self.out_channels, + ) + return x + + def unpatchify_old(self, x): + c = self.out_channels + t, h, w = [self.input_size[i] // self.patch_size[i] for i in range(3)] + pt, ph, pw = self.patch_size + + x = x.reshape(shape=(x.shape[0], t, h, w, pt, ph, pw, c)) + x = rearrange(x, "n t h w r p q c -> n c t r h p w q") + imgs = x.reshape(shape=(x.shape[0], c, t * pt, h * ph, w * pw)) + return imgs + + def get_spatial_pos_embed(self, grid_size=None): + if grid_size is None: + grid_size = self.input_size[1:] + pos_embed = get_2d_sincos_pos_embed( + self.hidden_size, + (grid_size[0] // self.patch_size[1], grid_size[1] // self.patch_size[2]), + scale=self.space_scale, + ) + pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False) + return pos_embed + + def get_temporal_pos_embed(self): + pos_embed = get_1d_sincos_pos_embed( + self.hidden_size, + self.input_size[0] // self.patch_size[0], + scale=self.time_scale, + ) + pos_embed = torch.from_numpy(pos_embed).float().unsqueeze(0).requires_grad_(False) + return pos_embed + + def freeze_not_temporal(self): + for n, p in self.named_parameters(): + if "attn_temp" not in n: + p.requires_grad = False + + def freeze_text(self): + for n, p in self.named_parameters(): + if "cross_attn" in n: + p.requires_grad = False + + def initialize_temporal(self): + for block in self.blocks: + nn.init.constant_(block.attn_temp.proj.weight, 0) + nn.init.constant_(block.attn_temp.proj.bias, 0) + + def initialize_weights(self): + # Initialize transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize patch_embed like nn.Linear (instead of nn.Conv2d): + w = self.x_embedder.proj.weight.data + nn.init.xavier_uniform_(w.view([w.shape[0], -1])) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02) + nn.init.normal_(self.t_block[1].weight, std=0.02) + + # Initialize caption embedding MLP: + nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02) + nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02) + + # Zero-out adaLN modulation layers in PixArt blocks: + for block in self.blocks: + nn.init.constant_(block.cross_attn.proj.weight, 0) + nn.init.constant_(block.cross_attn.proj.bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.linear.weight, 0) + nn.init.constant_(self.final_layer.linear.bias, 0) + + +@MODELS.register_module("STDiT-XL/2") +def STDiT_XL_2(from_pretrained=None, **kwargs): + model = STDiT(depth=28, hidden_size=1152, patch_size=(1, 2, 2), num_heads=16, **kwargs) + if from_pretrained is not None: + load_checkpoint(model, from_pretrained) + return model diff --git a/opensora/models/text_encoder/__init__.py b/opensora/models/text_encoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc6a9995d9652099a51159907eb1ebb7cc219c2 --- /dev/null +++ b/opensora/models/text_encoder/__init__.py @@ -0,0 +1,3 @@ +from .classes import ClassEncoder +from .clip import ClipEncoder +from .t5 import T5Encoder diff --git a/opensora/models/text_encoder/classes.py b/opensora/models/text_encoder/classes.py new file mode 100644 index 0000000000000000000000000000000000000000..f02c9f299f9a611f62141d063a80f38cd1b34b45 --- /dev/null +++ b/opensora/models/text_encoder/classes.py @@ -0,0 +1,20 @@ +import torch + +from opensora.registry import MODELS + + +@MODELS.register_module("classes") +class ClassEncoder: + def __init__(self, num_classes, model_max_length=None, device="cuda", dtype=torch.float): + self.num_classes = num_classes + self.y_embedder = None + + self.model_max_length = model_max_length + self.output_dim = None + self.device = device + + def encode(self, text): + return dict(y=torch.tensor([int(t) for t in text]).to(self.device)) + + def null(self, n): + return torch.tensor([self.num_classes] * n).to(self.device) diff --git a/opensora/models/text_encoder/clip.py b/opensora/models/text_encoder/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..c628d02bb1aab2c6ee74be1daa0ec824dda160ff --- /dev/null +++ b/opensora/models/text_encoder/clip.py @@ -0,0 +1,114 @@ +# Copyright 2024 Vchitect/Latte +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.# Modified from Latte +# +# This file is adapted from the Latte project. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# Latte: https://github.com/Vchitect/Latte +# DiT: https://github.com/facebookresearch/DiT/tree/main +# -------------------------------------------------------- + + +import torch +import torch.nn as nn +import transformers +from transformers import CLIPTextModel, CLIPTokenizer + +from opensora.registry import MODELS + +transformers.logging.set_verbosity_error() + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + + def __init__(self, path="openai/clip-vit-huge-patch14", device="cuda", max_length=77): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(path) + self.transformer = CLIPTextModel.from_pretrained(path) + self.device = device + self.max_length = max_length + self._freeze() + + def _freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer( + text, + truncation=True, + max_length=self.max_length, + return_length=True, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt", + ) + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + pooled_z = outputs.pooler_output + return z, pooled_z + + def encode(self, text): + return self(text) + + +@MODELS.register_module("clip") +class ClipEncoder: + """ + Embeds text prompt into vector representations. Also handles text dropout for classifier-free guidance. + """ + + def __init__( + self, + from_pretrained, + model_max_length=77, + device="cuda", + dtype=torch.float, + ): + super().__init__() + assert from_pretrained is not None, "Please specify the path to the T5 model" + + self.text_encoder = FrozenCLIPEmbedder(path=from_pretrained, max_length=model_max_length).to(device, dtype) + self.y_embedder = None + + self.model_max_length = model_max_length + self.output_dim = self.text_encoder.transformer.config.hidden_size + + def encode(self, text): + _, pooled_embeddings = self.text_encoder.encode(text) + y = pooled_embeddings.unsqueeze(1).unsqueeze(1) + return dict(y=y) + + def null(self, n): + null_y = self.y_embedder.y_embedding[None].repeat(n, 1, 1)[:, None] + return null_y + + def to(self, dtype): + self.text_encoder = self.text_encoder.to(dtype) + return self diff --git a/opensora/models/text_encoder/t5.py b/opensora/models/text_encoder/t5.py new file mode 100644 index 0000000000000000000000000000000000000000..f93612a759cb03d0a3e3fc8e72a44b181b6fe80a --- /dev/null +++ b/opensora/models/text_encoder/t5.py @@ -0,0 +1,358 @@ +# Adapted from PixArt +# +# Copyright (C) 2023 PixArt-alpha/PixArt-alpha +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. +# +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# PixArt: https://github.com/PixArt-alpha/PixArt-alpha +# T5: https://github.com/google-research/text-to-text-transfer-transformer +# -------------------------------------------------------- + + +import html +import os +import re +import urllib.parse as ul + +import ftfy +import torch +from bs4 import BeautifulSoup +from huggingface_hub import hf_hub_download +from transformers import AutoTokenizer, T5EncoderModel + +from opensora.registry import MODELS + + +class T5Embedder: + available_models = ["t5-v1_1-xxl"] + bad_punct_regex = re.compile( + r"[" + "#®•©™&@·º½¾¿¡§~" + "\)" + "\(" + "\]" + "\[" + "\}" + "\{" + "\|" + "\\" + "\/" + "\*" + r"]{1,}" + ) # noqa + + def __init__( + self, + device, + dir_or_name="t5-v1_1-xxl", + *, + local_cache=False, + cache_dir=None, + hf_token=None, + use_text_preprocessing=True, + t5_model_kwargs=None, + torch_dtype=None, + use_offload_folder=None, + model_max_length=120, + ): + self.device = torch.device(device) + self.torch_dtype = torch_dtype or torch.bfloat16 + if t5_model_kwargs is None: + t5_model_kwargs = {"low_cpu_mem_usage": True, "torch_dtype": self.torch_dtype} + if use_offload_folder is not None: + t5_model_kwargs["offload_folder"] = use_offload_folder + t5_model_kwargs["device_map"] = { + "shared": self.device, + "encoder.embed_tokens": self.device, + "encoder.block.0": self.device, + "encoder.block.1": self.device, + "encoder.block.2": self.device, + "encoder.block.3": self.device, + "encoder.block.4": self.device, + "encoder.block.5": self.device, + "encoder.block.6": self.device, + "encoder.block.7": self.device, + "encoder.block.8": self.device, + "encoder.block.9": self.device, + "encoder.block.10": self.device, + "encoder.block.11": self.device, + "encoder.block.12": "disk", + "encoder.block.13": "disk", + "encoder.block.14": "disk", + "encoder.block.15": "disk", + "encoder.block.16": "disk", + "encoder.block.17": "disk", + "encoder.block.18": "disk", + "encoder.block.19": "disk", + "encoder.block.20": "disk", + "encoder.block.21": "disk", + "encoder.block.22": "disk", + "encoder.block.23": "disk", + "encoder.final_layer_norm": "disk", + "encoder.dropout": "disk", + } + else: + t5_model_kwargs["device_map"] = {"shared": self.device, "encoder": self.device} + + self.use_text_preprocessing = use_text_preprocessing + self.hf_token = hf_token + self.cache_dir = cache_dir or os.path.expanduser("~/.cache/IF_") + self.dir_or_name = dir_or_name + tokenizer_path, path = dir_or_name, dir_or_name + if local_cache: + cache_dir = os.path.join(self.cache_dir, dir_or_name) + tokenizer_path, path = cache_dir, cache_dir + elif dir_or_name in self.available_models: + cache_dir = os.path.join(self.cache_dir, dir_or_name) + for filename in [ + "config.json", + "special_tokens_map.json", + "spiece.model", + "tokenizer_config.json", + "pytorch_model.bin.index.json", + "pytorch_model-00001-of-00002.bin", + "pytorch_model-00002-of-00002.bin", + ]: + hf_hub_download( + repo_id=f"DeepFloyd/{dir_or_name}", + filename=filename, + cache_dir=cache_dir, + force_filename=filename, + token=self.hf_token, + ) + tokenizer_path, path = cache_dir, cache_dir + else: + cache_dir = os.path.join(self.cache_dir, "t5-v1_1-xxl") + for filename in [ + "config.json", + "special_tokens_map.json", + "spiece.model", + "tokenizer_config.json", + ]: + hf_hub_download( + repo_id="DeepFloyd/t5-v1_1-xxl", + filename=filename, + cache_dir=cache_dir, + force_filename=filename, + token=self.hf_token, + ) + tokenizer_path = cache_dir + + print(tokenizer_path) + self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_path) + self.model = T5EncoderModel.from_pretrained(path, **t5_model_kwargs).eval() + self.model_max_length = model_max_length + + def get_text_embeddings(self, texts): + texts = [self.text_preprocessing(text) for text in texts] + + text_tokens_and_mask = self.tokenizer( + texts, + max_length=self.model_max_length, + padding="max_length", + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + + text_tokens_and_mask["input_ids"] = text_tokens_and_mask["input_ids"] + text_tokens_and_mask["attention_mask"] = text_tokens_and_mask["attention_mask"] + + with torch.no_grad(): + text_encoder_embs = self.model( + input_ids=text_tokens_and_mask["input_ids"].to(self.device), + attention_mask=text_tokens_and_mask["attention_mask"].to(self.device), + )["last_hidden_state"].detach() + return text_encoder_embs, text_tokens_and_mask["attention_mask"].to(self.device) + + def text_preprocessing(self, text): + if self.use_text_preprocessing: + # The exact text cleaning as was in the training stage: + text = self.clean_caption(text) + text = self.clean_caption(text) + return text + else: + return text.lower().strip() + + @staticmethod + def basic_clean(text): + text = ftfy.fix_text(text) + text = html.unescape(html.unescape(text)) + return text.strip() + + def clean_caption(self, caption): + caption = str(caption) + caption = ul.unquote_plus(caption) + caption = caption.strip().lower() + caption = re.sub("", "person", caption) + # urls: + caption = re.sub( + r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + caption = re.sub( + r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa + "", + caption, + ) # regex for urls + # html: + caption = BeautifulSoup(caption, features="html.parser").text + + # @ + caption = re.sub(r"@[\w\d]+\b", "", caption) + + # 31C0—31EF CJK Strokes + # 31F0—31FF Katakana Phonetic Extensions + # 3200—32FF Enclosed CJK Letters and Months + # 3300—33FF CJK Compatibility + # 3400—4DBF CJK Unified Ideographs Extension A + # 4DC0—4DFF Yijing Hexagram Symbols + # 4E00—9FFF CJK Unified Ideographs + caption = re.sub(r"[\u31c0-\u31ef]+", "", caption) + caption = re.sub(r"[\u31f0-\u31ff]+", "", caption) + caption = re.sub(r"[\u3200-\u32ff]+", "", caption) + caption = re.sub(r"[\u3300-\u33ff]+", "", caption) + caption = re.sub(r"[\u3400-\u4dbf]+", "", caption) + caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption) + caption = re.sub(r"[\u4e00-\u9fff]+", "", caption) + ####################################################### + + # все виды тире / all types of dash --> "-" + caption = re.sub( + r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa + "-", + caption, + ) + + # кавычки к одному стандарту + caption = re.sub(r"[`´«»“”¨]", '"', caption) + caption = re.sub(r"[‘’]", "'", caption) + + # " + caption = re.sub(r""?", "", caption) + # & + caption = re.sub(r"&", "", caption) + + # ip adresses: + caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption) + + # article ids: + caption = re.sub(r"\d:\d\d\s+$", "", caption) + + # \n + caption = re.sub(r"\\n", " ", caption) + + # "#123" + caption = re.sub(r"#\d{1,3}\b", "", caption) + # "#12345.." + caption = re.sub(r"#\d{5,}\b", "", caption) + # "123456.." + caption = re.sub(r"\b\d{6,}\b", "", caption) + # filenames: + caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption) + + # + caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT""" + caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT""" + + caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT + caption = re.sub(r"\s+\.\s+", r" ", caption) # " . " + + # this-is-my-cute-cat / this_is_my_cute_cat + regex2 = re.compile(r"(?:\-|\_)") + if len(re.findall(regex2, caption)) > 3: + caption = re.sub(regex2, " ", caption) + + caption = self.basic_clean(caption) + + caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640 + caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc + caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231 + + caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption) + caption = re.sub(r"(free\s)?download(\sfree)?", "", caption) + caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption) + caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption) + caption = re.sub(r"\bpage\s+\d+\b", "", caption) + + caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a... + + caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption) + + caption = re.sub(r"\b\s+\:\s+", r": ", caption) + caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption) + caption = re.sub(r"\s+", " ", caption) + + caption.strip() + + caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption) + caption = re.sub(r"^[\'\_,\-\:;]", r"", caption) + caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption) + caption = re.sub(r"^\.\S+$", "", caption) + + return caption.strip() + + +@MODELS.register_module("t5") +class T5Encoder: + def __init__( + self, + from_pretrained=None, + model_max_length=120, + device="cuda", + dtype=torch.float, + local_cache=True, + shardformer=False, + ): + assert from_pretrained is not None, "Please specify the path to the T5 model" + + self.t5 = T5Embedder( + device=device, + torch_dtype=dtype, + local_cache=local_cache, + cache_dir=from_pretrained, + model_max_length=model_max_length, + ) + self.t5.model.to(dtype=dtype) + self.y_embedder = None + + self.model_max_length = model_max_length + self.output_dim = self.t5.model.config.d_model + + if shardformer: + self.shardformer_t5() + + def shardformer_t5(self): + from colossalai.shardformer import ShardConfig, ShardFormer + + from opensora.acceleration.shardformer.policy.t5_encoder import T5EncoderPolicy + from opensora.utils.misc import requires_grad + + shard_config = ShardConfig( + tensor_parallel_process_group=None, + pipeline_stage_manager=None, + enable_tensor_parallelism=False, + enable_fused_normalization=False, + enable_flash_attention=False, + enable_jit_fused=True, + enable_sequence_parallelism=False, + enable_sequence_overlap=False, + ) + shard_former = ShardFormer(shard_config=shard_config) + optim_model, _ = shard_former.optimize(self.t5.model, policy=T5EncoderPolicy()) + self.t5.model = optim_model.half() + + # ensure the weights are frozen + requires_grad(self.t5.model, False) + + def encode(self, text): + caption_embs, emb_masks = self.t5.get_text_embeddings(text) + caption_embs = caption_embs[:, None] + return dict(y=caption_embs, mask=emb_masks) + + def null(self, n): + null_y = self.y_embedder.y_embedding[None].repeat(n, 1, 1)[:, None] + return null_y diff --git a/opensora/models/vae/__init__.py b/opensora/models/vae/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63510b08b2036160c01d38b0ad3484757f6bcff7 --- /dev/null +++ b/opensora/models/vae/__init__.py @@ -0,0 +1 @@ +from .vae import VideoAutoencoderKL, VideoAutoencoderKLTemporalDecoder diff --git a/opensora/models/vae/vae.py b/opensora/models/vae/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..363bbfed43f8b191762c5d78138159ce6eb4c4a1 --- /dev/null +++ b/opensora/models/vae/vae.py @@ -0,0 +1,82 @@ +import torch +import torch.nn as nn +from diffusers.models import AutoencoderKL, AutoencoderKLTemporalDecoder +from einops import rearrange + +from opensora.registry import MODELS + + +@MODELS.register_module() +class VideoAutoencoderKL(nn.Module): + def __init__(self, from_pretrained=None, micro_batch_size=None): + super().__init__() + self.module = AutoencoderKL.from_pretrained(from_pretrained) + self.out_channels = self.module.config.latent_channels + self.patch_size = (1, 8, 8) + self.micro_batch_size = micro_batch_size + + def encode(self, x): + # x: (B, C, T, H, W) + B = x.shape[0] + x = rearrange(x, "B C T H W -> (B T) C H W") + + if self.micro_batch_size is None: + x = self.module.encode(x).latent_dist.sample().mul_(0.18215) + else: + bs = self.micro_batch_size + x_out = [] + for i in range(0, x.shape[0], bs): + x_bs = x[i : i + bs] + x_bs = self.module.encode(x_bs).latent_dist.sample().mul_(0.18215) + x_out.append(x_bs) + x = torch.cat(x_out, dim=0) + x = rearrange(x, "(B T) C H W -> B C T H W", B=B) + return x + + def decode(self, x): + # x: (B, C, T, H, W) + B = x.shape[0] + x = rearrange(x, "B C T H W -> (B T) C H W") + if self.micro_batch_size is None: + x = self.module.decode(x / 0.18215).sample + else: + bs = self.micro_batch_size + x_out = [] + for i in range(0, x.shape[0], bs): + x_bs = x[i : i + bs] + x_bs = self.module.decode(x_bs / 0.18215).sample + x_out.append(x_bs) + x = torch.cat(x_out, dim=0) + x = rearrange(x, "(B T) C H W -> B C T H W", B=B) + return x + + def get_latent_size(self, input_size): + for i in range(3): + assert input_size[i] % self.patch_size[i] == 0, "Input size must be divisible by patch size" + input_size = [input_size[i] // self.patch_size[i] for i in range(3)] + return input_size + + +@MODELS.register_module() +class VideoAutoencoderKLTemporalDecoder(nn.Module): + def __init__(self, from_pretrained=None): + super().__init__() + self.module = AutoencoderKLTemporalDecoder.from_pretrained(from_pretrained) + self.out_channels = self.module.config.latent_channels + self.patch_size = (1, 8, 8) + + def encode(self, x): + raise NotImplementedError + + def decode(self, x): + B, _, T = x.shape[:3] + x = rearrange(x, "B C T H W -> (B T) C H W") + x = self.module.decode(x / 0.18215, num_frames=T).sample + x = rearrange(x, "(B T) C H W -> B C T H W", B=B) + return x + + def get_latent_size(self, input_size): + for i in range(3): + assert input_size[i] % self.patch_size[i] == 0, "Input size must be divisible by patch size" + input_size = [input_size[i] // self.patch_size[i] for i in range(3)] + return input_size diff --git a/opensora/registry.py b/opensora/registry.py new file mode 100644 index 0000000000000000000000000000000000000000..7797d36bd76fd482766562d030948ddb85c17aef --- /dev/null +++ b/opensora/registry.py @@ -0,0 +1,39 @@ +from copy import deepcopy + +import torch.nn as nn +from mmengine.registry import Registry + + +def build_module(module, builder, **kwargs): + """Build module from config or return the module itself. + + Args: + module (Union[dict, nn.Module]): The module to build. + builder (Registry): The registry to build module. + *args, **kwargs: Arguments passed to build function. + + Returns: + Any: The built module. + """ + if isinstance(module, dict): + cfg = deepcopy(module) + for k, v in kwargs.items(): + cfg[k] = v + return builder.build(cfg) + elif isinstance(module, nn.Module): + return module + elif module is None: + return None + else: + raise TypeError(f"Only support dict and nn.Module, but got {type(module)}.") + + +MODELS = Registry( + "model", + locations=["opensora.models"], +) + +SCHEDULERS = Registry( + "scheduler", + locations=["opensora.schedulers"], +) diff --git a/opensora/schedulers/__init__.py b/opensora/schedulers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..97ea76f92f8b99664e35c51172e35d66d704edc4 --- /dev/null +++ b/opensora/schedulers/__init__.py @@ -0,0 +1,2 @@ +from .dpms import DPMS +from .iddpm import IDDPM diff --git a/opensora/schedulers/dpms/__init__.py b/opensora/schedulers/dpms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f0cebbcd21fe591ace55857ff5cb64f82293335e --- /dev/null +++ b/opensora/schedulers/dpms/__init__.py @@ -0,0 +1,50 @@ +from functools import partial + +import torch + +from opensora.registry import SCHEDULERS + +from .dpm_solver import DPMS + + +@SCHEDULERS.register_module("dpm-solver") +class DMP_SOLVER: + def __init__(self, num_sampling_steps=None, cfg_scale=4.0): + self.num_sampling_steps = num_sampling_steps + self.cfg_scale = cfg_scale + + def sample( + self, + model, + text_encoder, + z_size, + prompts, + device, + additional_args=None, + ): + n = len(prompts) + z = torch.randn(n, *z_size, device=device) + model_args = text_encoder.encode(prompts) + y = model_args.pop("y") + null_y = text_encoder.null(n) + if additional_args is not None: + model_args.update(additional_args) + + dpms = DPMS( + partial(forward_with_dpmsolver, model), + condition=y, + uncondition=null_y, + cfg_scale=self.cfg_scale, + model_kwargs=model_args, + ) + samples = dpms.sample(z, steps=self.num_sampling_steps, order=2, skip_type="time_uniform", method="multistep") + return samples + + +def forward_with_dpmsolver(self, x, timestep, y, **kwargs): + """ + dpm solver donnot need variance prediction + """ + # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb + model_out = self.forward(x, timestep, y, **kwargs) + return model_out.chunk(2, dim=1)[0] diff --git a/opensora/schedulers/dpms/dpm_solver.py b/opensora/schedulers/dpms/dpm_solver.py new file mode 100644 index 0000000000000000000000000000000000000000..106e59ec9c2a22de935210ecfd8153bcf7ebb551 --- /dev/null +++ b/opensora/schedulers/dpms/dpm_solver.py @@ -0,0 +1,1570 @@ +# MIT License +# +# Copyright (c) 2022 Cheng Lu +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# +# This file is adapted from the dpm-solver project +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# PixArt: https://github.com/PixArt-alpha/PixArt-alpha +# dpm-solver: https://github.com/LuChengTHU/dpm-solver +# -------------------------------------------------------- + +import math + +import numpy as np +import torch +from tqdm import tqdm + + +def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac): + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + warmup_time = int(num_diffusion_timesteps * warmup_frac) + betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64) + return betas + + +def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps): + """ + This is the deprecated API for creating beta schedules. + See get_named_beta_schedule() for the new library of schedules. + """ + if beta_schedule == "quad": + betas = ( + np.linspace( + beta_start**0.5, + beta_end**0.5, + num_diffusion_timesteps, + dtype=np.float64, + ) + ** 2 + ) + elif beta_schedule == "linear": + betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "warmup10": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1) + elif beta_schedule == "warmup50": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5) + elif beta_schedule == "const": + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1 + betas = 1.0 / np.linspace(num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64) + else: + raise NotImplementedError(beta_schedule) + assert betas.shape == (num_diffusion_timesteps,) + return betas + + +def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): + """ + Get a pre-defined beta schedule for the given name. + The beta schedule library consists of beta schedules which remain similar + in the limit of num_diffusion_timesteps. + Beta schedules may be added, but should not be removed or changed once + they are committed to maintain backwards compatibility. + """ + if schedule_name == "linear": + # Linear schedule from Ho et al, extended to work for any number of + # diffusion steps. + scale = 1000 / num_diffusion_timesteps + return get_beta_schedule( + "linear", + beta_start=scale * 0.0001, + beta_end=scale * 0.02, + num_diffusion_timesteps=num_diffusion_timesteps, + ) + elif schedule_name == "squaredcos_cap_v2": + return betas_for_alpha_bar( + num_diffusion_timesteps, + lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, + ) + else: + raise NotImplementedError(f"unknown beta schedule: {schedule_name}") + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +class NoiseScheduleVP: + def __init__( + self, + schedule="discrete", + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20.0, + dtype=torch.float32, + ): + """Create a wrapper class for the forward SDE (VP type). + + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + + t = self.inverse_lambda(lambda_t) + + =============================================================== + + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + + 1. For discrete-time DPMs: + + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + + Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + + + 2. For continuous-time DPMs: + + We support the linear VPSDE for the continuous time setting. The hyperparameters for the noise + schedule are the default settings in Yang Song's ScoreSDE: + + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + T: A `float` number. The ending time of the forward process. + + =============================================================== + + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + + Example: + + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + + """ + + if schedule not in ["discrete", "linear"]: + raise ValueError(f"Unsupported noise schedule {schedule}. The schedule needs to be 'discrete' or 'linear'") + + self.schedule = schedule + if schedule == "discrete": + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.T = 1.0 + self.log_alpha_array = ( + self.numerical_clip_alpha(log_alphas) + .reshape( + ( + 1, + -1, + ) + ) + .to(dtype=dtype) + ) + self.total_N = self.log_alpha_array.shape[1] + self.t_array = torch.linspace(0.0, 1.0, self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype) + else: + self.T = 1.0 + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + + def numerical_clip_alpha(self, log_alphas, clipped_lambda=-5.1): + """ + For some beta schedules such as cosine schedule, the log-SNR has numerical isssues. + We clip the log-SNR near t=T within -5.1 to ensure the stability. + Such a trick is very useful for diffusion models with the cosine schedule, such as i-DDPM, guided-diffusion and GLIDE. + """ + log_sigmas = 0.5 * torch.log(1.0 - torch.exp(2.0 * log_alphas)) + lambs = log_alphas - log_sigmas + idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda) + if idx > 0: + log_alphas = log_alphas[:-idx] + return log_alphas + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == "discrete": + return interpolate_fn( + t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device) + ).reshape((-1)) + elif self.schedule == "linear": + return -0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1.0 - torch.exp(2.0 * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1.0 - torch.exp(2.0 * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == "linear": + tmp = 2.0 * (self.beta_1 - self.beta_0) * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0**2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == "discrete": + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2.0 * lamb) + t = interpolate_fn( + log_alpha.reshape((-1, 1)), + torch.flip(self.log_alpha_array.to(lamb.device), [1]), + torch.flip(self.t_array.to(lamb.device), [1]), + ) + return t.reshape((-1,)) + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1.0, + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + + We support four types of the diffusion model by setting `model_type`: + + 1. "noise": noise prediction model. (Trained by predicting noise). + + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + + =============================================================== + + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == "discrete": + return (t_continuous - 1.0 / noise_schedule.total_N) * 1000.0 + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + return (x - expand_dims(alpha_t, x.dim()) * output) / expand_dims(sigma_t, x.dim()) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + return expand_dims(alpha_t, x.dim()) * output + expand_dims(sigma_t, x.dim()) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + return -expand_dims(sigma_t, x.dim()) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, x.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1.0 or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v", "score"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__( + self, + model_fn, + noise_schedule, + algorithm_type="dpmsolver++", + correcting_x0_fn=None, + correcting_xt_fn=None, + thresholding_max_val=1.0, + dynamic_thresholding_ratio=0.995, + ): + """Construct a DPM-Solver. + + We support both DPM-Solver (`algorithm_type="dpmsolver"`) and DPM-Solver++ (`algorithm_type="dpmsolver++"`). + + We also support the "dynamic thresholding" method in Imagen[1]. For pixel-space diffusion models, you + can set both `algorithm_type="dpmsolver++"` and `correcting_x0_fn="dynamic_thresholding"` to use the + dynamic thresholding. The "dynamic thresholding" can greatly improve the sample quality for pixel-space + DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space + DPMs (such as stable-diffusion). + + To support advanced algorithms in image-to-image applications, we also support corrector functions for + both x0 and xt. + + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + algorithm_type: A `str`. Either "dpmsolver" or "dpmsolver++". + correcting_x0_fn: A `str` or a function with the following format: + ``` + def correcting_x0_fn(x0, t): + x0_new = ... + return x0_new + ``` + This function is to correct the outputs of the data prediction model at each sampling step. e.g., + ``` + x0_pred = data_pred_model(xt, t) + if correcting_x0_fn is not None: + x0_pred = correcting_x0_fn(x0_pred, t) + xt_1 = update(x0_pred, xt, t) + ``` + If `correcting_x0_fn="dynamic_thresholding"`, we use the dynamic thresholding proposed in Imagen[1]. + correcting_xt_fn: A function with the following format: + ``` + def correcting_xt_fn(xt, t, step): + x_new = ... + return x_new + ``` + This function is to correct the intermediate samples xt at each sampling step. e.g., + ``` + xt = ... + xt = correcting_xt_fn(xt, t, step) + ``` + thresholding_max_val: A `float`. The max value for thresholding. + Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`. + dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details). + Valid only when use `dpmsolver++` and `correcting_x0_fn="dynamic_thresholding"`. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, + Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models + with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = lambda x, t: model_fn(x, t.expand((x.shape[0]))) + self.noise_schedule = noise_schedule + assert algorithm_type in ["dpmsolver", "dpmsolver++"] + self.algorithm_type = algorithm_type + if correcting_x0_fn == "dynamic_thresholding": + self.correcting_x0_fn = self.dynamic_thresholding_fn + else: + self.correcting_x0_fn = correcting_x0_fn + self.correcting_xt_fn = correcting_xt_fn + self.dynamic_thresholding_ratio = dynamic_thresholding_ratio + self.thresholding_max_val = thresholding_max_val + + def dynamic_thresholding_fn(self, x0, t): + """ + The dynamic thresholding method. + """ + dims = x0.dim() + p = self.dynamic_thresholding_ratio + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with corrector). + """ + noise = self.noise_prediction_fn(x, t) + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - sigma_t * noise) / alpha_t + if self.correcting_x0_fn is not None: + x0 = self.correcting_x0_fn(x0, t) + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.algorithm_type == "dpmsolver++": + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == "logSNR": + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == "time_uniform": + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == "time_quadratic": + t_order = 2 + return torch.linspace(t_T ** (1.0 / t_order), t_0 ** (1.0 / t_order), N + 1).pow(t_order).to(device) + else: + raise ValueError( + f"Unsupported skip_type {skip_type}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'" + ) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [ + 3, + ] * ( + K - 2 + ) + [2, 1] + elif steps % 3 == 1: + orders = [ + 3, + ] * ( + K - 1 + ) + [1] + else: + orders = [ + 3, + ] * ( + K - 1 + ) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [ + 2, + ] * K + else: + K = steps // 2 + 1 + orders = [ + 2, + ] * ( + K - 1 + ) + [1] + elif order == 1: + K = 1 + orders = [ + 1, + ] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == "logSNR": + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[ + torch.cumsum( + torch.tensor( + [ + 0, + ] + + orders + ), + 0, + ).to(device) + ] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (1,). + t: A pytorch tensor. The ending time, with the shape (1,). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.algorithm_type == "dpmsolver++": + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = sigma_t / sigma_s * x - alpha_t * phi_1 * model_s + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = torch.exp(log_alpha_t - log_alpha_s) * x - (sigma_t * phi_1) * model_s + return (x_t, {"model_s": model_s}) if return_intermediate else x_t + + def singlestep_dpm_solver_second_update( + self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type="dpmsolver" + ): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (1,). + t: A pytorch tensor. The ending time, with the shape (1,). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ["dpmsolver", "taylor"]: + raise ValueError(f"'solver_type' must be either 'dpmsolver' or 'taylor', got {solver_type}") + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ( + ns.marginal_log_mean_coeff(s), + ns.marginal_log_mean_coeff(s1), + ns.marginal_log_mean_coeff(t), + ) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.algorithm_type == "dpmsolver++": + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s + model_s1 = self.model_fn(x_s1, s1) + if solver_type == "dpmsolver": + x_t = ( + (sigma_t / sigma_s) * x + - (alpha_t * phi_1) * model_s + - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s) + ) + elif solver_type == "taylor": + x_t = ( + (sigma_t / sigma_s) * x + - (alpha_t * phi_1) * model_s + + (1.0 / r1) * (alpha_t * (phi_1 / h + 1.0)) * (model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = torch.exp(log_alpha_s1 - log_alpha_s) * x - (sigma_s1 * phi_11) * model_s + model_s1 = self.model_fn(x_s1, s1) + if solver_type == "dpmsolver": + x_t = ( + torch.exp(log_alpha_t - log_alpha_s) * x + - (sigma_t * phi_1) * model_s + - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s) + ) + elif solver_type == "taylor": + x_t = ( + torch.exp(log_alpha_t - log_alpha_s) * x + - (sigma_t * phi_1) * model_s + - (1.0 / r1) * (sigma_t * (phi_1 / h - 1.0)) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {"model_s": model_s, "model_s1": model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update( + self, + x, + s, + t, + r1=1.0 / 3.0, + r2=2.0 / 3.0, + model_s=None, + model_s1=None, + return_intermediate=False, + solver_type="dpmsolver", + ): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (1,). + t: A pytorch tensor. The ending time, with the shape (1,). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ["dpmsolver", "taylor"]: + raise ValueError(f"'solver_type' must be either 'dpmsolver' or 'taylor', got {solver_type}") + if r1 is None: + r1 = 1.0 / 3.0 + if r2 is None: + r2 = 2.0 / 3.0 + ns = self.noise_schedule + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ( + ns.marginal_log_mean_coeff(s), + ns.marginal_log_mean_coeff(s1), + ns.marginal_log_mean_coeff(s2), + ns.marginal_log_mean_coeff(t), + ) + sigma_s, sigma_s1, sigma_s2, sigma_t = ( + ns.marginal_std(s), + ns.marginal_std(s1), + ns.marginal_std(s2), + ns.marginal_std(t), + ) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.algorithm_type == "dpmsolver++": + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.0 + phi_2 = phi_1 / h + 1.0 + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + (sigma_s2 / sigma_s) * x + - (alpha_s2 * phi_12) * model_s + + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == "dpmsolver": + x_t = ( + (sigma_t / sigma_s) * x + - (alpha_t * phi_1) * model_s + + (1.0 / r2) * (alpha_t * phi_2) * (model_s2 - model_s) + ) + elif solver_type == "taylor": + D1_0 = (1.0 / r1) * (model_s1 - model_s) + D1_1 = (1.0 / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + (sigma_t / sigma_s) * x + - (alpha_t * phi_1) * model_s + + (alpha_t * phi_2) * D1 + - (alpha_t * phi_3) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.0 + phi_2 = phi_1 / h - 1.0 + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = (torch.exp(log_alpha_s1 - log_alpha_s)) * x - (sigma_s1 * phi_11) * model_s + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + (torch.exp(log_alpha_s2 - log_alpha_s)) * x + - (sigma_s2 * phi_12) * model_s + - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == "dpmsolver": + x_t = ( + (torch.exp(log_alpha_t - log_alpha_s)) * x + - (sigma_t * phi_1) * model_s + - (1.0 / r2) * (sigma_t * phi_2) * (model_s2 - model_s) + ) + elif solver_type == "taylor": + D1_0 = (1.0 / r1) * (model_s1 - model_s) + D1_1 = (1.0 / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + (torch.exp(log_alpha_t - log_alpha_s)) * x + - (sigma_t * phi_1) * model_s + - (sigma_t * phi_2) * D1 + - (sigma_t * phi_3) * D2 + ) + + if return_intermediate: + return x_t, {"model_s": model_s, "model_s1": model_s1, "model_s2": model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpmsolver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,) + t: A pytorch tensor. The ending time, with the shape (1,). + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ["dpmsolver", "taylor"]: + raise ValueError(f"'solver_type' must be either 'dpmsolver' or 'taylor', got {solver_type}") + ns = self.noise_schedule + model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1] + t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1] + lambda_prev_1, lambda_prev_0, lambda_t = ( + ns.marginal_lambda(t_prev_1), + ns.marginal_lambda(t_prev_0), + ns.marginal_lambda(t), + ) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1) + if self.algorithm_type == "dpmsolver++": + phi_1 = torch.expm1(-h) + if solver_type == "dpmsolver": + x_t = (sigma_t / sigma_prev_0) * x - (alpha_t * phi_1) * model_prev_0 - 0.5 * (alpha_t * phi_1) * D1_0 + elif solver_type == "taylor": + x_t = ( + (sigma_t / sigma_prev_0) * x + - (alpha_t * phi_1) * model_prev_0 + + (alpha_t * (phi_1 / h + 1.0)) * D1_0 + ) + else: + phi_1 = torch.expm1(h) + if solver_type == "dpmsolver": + x_t = ( + (torch.exp(log_alpha_t - log_alpha_prev_0)) * x + - (sigma_t * phi_1) * model_prev_0 + - 0.5 * (sigma_t * phi_1) * D1_0 + ) + elif solver_type == "taylor": + x_t = ( + (torch.exp(log_alpha_t - log_alpha_prev_0)) * x + - (sigma_t * phi_1) * model_prev_0 + - (sigma_t * (phi_1 / h - 1.0)) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpmsolver"): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,) + t: A pytorch tensor. The ending time, with the shape (1,). + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ( + ns.marginal_lambda(t_prev_2), + ns.marginal_lambda(t_prev_1), + ns.marginal_lambda(t_prev_0), + ns.marginal_lambda(t), + ) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1) + D1_1 = (1.0 / r1) * (model_prev_1 - model_prev_2) + D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1) + D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1) + if self.algorithm_type == "dpmsolver++": + phi_1 = torch.expm1(-h) + phi_2 = phi_1 / h + 1.0 + phi_3 = phi_2 / h - 0.5 + return ( + (sigma_t / sigma_prev_0) * x + - (alpha_t * phi_1) * model_prev_0 + + (alpha_t * phi_2) * D1 + - (alpha_t * phi_3) * D2 + ) + else: + phi_1 = torch.expm1(h) + phi_2 = phi_1 / h - 1.0 + phi_3 = phi_2 / h - 0.5 + return ( + (torch.exp(log_alpha_t - log_alpha_prev_0)) * x + - (sigma_t * phi_1) * model_prev_0 + - (sigma_t * phi_2) * D1 + - (sigma_t * phi_3) * D2 + ) + + def singlestep_dpm_solver_update( + self, x, s, t, order, return_intermediate=False, solver_type="dpmsolver", r1=None, r2=None + ): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (1,). + t: A pytorch tensor. The ending time, with the shape (1,). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update( + x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1 + ) + elif order == 3: + return self.singlestep_dpm_solver_third_update( + x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2 + ) + else: + raise ValueError(f"Solver order must be 1 or 2 or 3, got {order}") + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type="dpmsolver"): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,) + t: A pytorch tensor. The ending time, with the shape (1,). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError(f"Solver order must be 1 or 2 or 3, got {order}") + + def dpm_solver_adaptive( + self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type="dpmsolver" + ): + """ + The adaptive step size solver based on singlestep DPM-Solver. + + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpmsolver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((1,)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update( + x, s, t, r1=r1, solver_type=solver_type, **kwargs + ) + elif order == 3: + r1, r2 = 1.0 / 3.0, 2.0 / 3.0 + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update( + x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type + ) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update( + x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs + ) + else: + raise ValueError(f"For adaptive step size solver, order must be 2 or 3, got {order}") + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.0): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1.0 / order).float(), lambda_0 - lambda_s) + nfe += order + print("adaptive solver nfe", nfe) + return x + + def add_noise(self, x, t, noise=None): + """ + Compute the noised input xt = alpha_t * x + sigma_t * noise. + + Args: + x: A `torch.Tensor` with shape `(batch_size, *shape)`. + t: A `torch.Tensor` with shape `(t_size,)`. + Returns: + xt with shape `(t_size, batch_size, *shape)`. + """ + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + if noise is None: + noise = torch.randn((t.shape[0], *x.shape), device=x.device) + x = x.reshape((-1, *x.shape)) + xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise + return xt.squeeze(0) if t.shape[0] == 1 else xt + + def inverse( + self, + x, + steps=20, + t_start=None, + t_end=None, + order=2, + skip_type="time_uniform", + method="multistep", + lower_order_final=True, + denoise_to_zero=False, + solver_type="dpmsolver", + atol=0.0078, + rtol=0.05, + return_intermediate=False, + ): + """ + Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver. + For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training. + """ + t_0 = 1.0 / self.noise_schedule.total_N if t_start is None else t_start + t_T = self.noise_schedule.T if t_end is None else t_end + assert ( + t_0 > 0 and t_T > 0 + ), "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array" + return self.sample( + x, + steps=steps, + t_start=t_0, + t_end=t_T, + order=order, + skip_type=skip_type, + method=method, + lower_order_final=lower_order_final, + denoise_to_zero=denoise_to_zero, + solver_type=solver_type, + atol=atol, + rtol=rtol, + return_intermediate=return_intermediate, + ) + + def sample( + self, + x, + steps=20, + t_start=None, + t_end=None, + order=2, + skip_type="time_uniform", + method="multistep", + lower_order_final=True, + denoise_to_zero=False, + solver_type="dpmsolver", + atol=0.0078, + rtol=0.05, + return_intermediate=False, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + + ===================================================== + + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + + ===================================================== + + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver or DPM-Solver++ ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g., DPM-Solver: + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver") + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + e.g., DPM-Solver++: + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++") + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `algorithm_type="dpmsolver++"` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++") + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + return_intermediate: A `bool`. Whether to save the xt at each step. + When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + + """ + t_0 = 1.0 / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + assert ( + t_0 > 0 and t_T > 0 + ), "Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array" + if return_intermediate: + assert method in [ + "multistep", + "singlestep", + "singlestep_fixed", + ], "Cannot use adaptive solver when saving intermediate values" + if self.correcting_xt_fn is not None: + assert method in [ + "multistep", + "singlestep", + "singlestep_fixed", + ], "Cannot use adaptive solver when correcting_xt_fn is not None" + device = x.device + intermediates = [] + with torch.no_grad(): + if method == "adaptive": + x = self.dpm_solver_adaptive( + x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type + ) + elif method == "multistep": + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + # Init the initial values. + step = 0 + t = timesteps[step] + t_prev_list = [t] + model_prev_list = [self.model_fn(x, t)] + if self.correcting_xt_fn is not None: + x = self.correcting_xt_fn(x, t, step) + if return_intermediate: + intermediates.append(x) + # Init the first `order` values by lower order multistep DPM-Solver. + for step in range(1, order): + t = timesteps[step] + x = self.multistep_dpm_solver_update( + x, model_prev_list, t_prev_list, t, step, solver_type=solver_type + ) + if self.correcting_xt_fn is not None: + x = self.correcting_xt_fn(x, t, step) + if return_intermediate: + intermediates.append(x) + t_prev_list.append(t) + model_prev_list.append(self.model_fn(x, t)) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in tqdm(range(order, steps + 1)): + t = timesteps[step] + # We only use lower order for steps < 10 + if lower_order_final and steps < 10: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update( + x, model_prev_list, t_prev_list, t, step_order, solver_type=solver_type + ) + if self.correcting_xt_fn is not None: + x = self.correcting_xt_fn(x, t, step) + if return_intermediate: + intermediates.append(x) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, t) + elif method in ["singlestep", "singlestep_fixed"]: + if method == "singlestep": + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver( + steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device + ) + elif method == "singlestep_fixed": + K = steps // order + orders = [ + order, + ] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for step, order in enumerate(orders): + s, t = timesteps_outer[step], timesteps_outer[step + 1] + timesteps_inner = self.get_time_steps( + skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order, device=device + ) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2) + if self.correcting_xt_fn is not None: + x = self.correcting_xt_fn(x, t, step) + if return_intermediate: + intermediates.append(x) + else: + raise ValueError(f"Got wrong method {method}") + if denoise_to_zero: + t = torch.ones((1,)).to(device) * t_0 + x = self.denoise_to_zero_fn(x, t) + if self.correcting_xt_fn is not None: + x = self.correcting_xt_fn(x, t, step + 1) + if return_intermediate: + intermediates.append(x) + return (x, intermediates) if return_intermediate else x + + +############################################################# +# other utility functions +############################################################# + + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), + torch.tensor(K - 2, device=x.device), + cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), + torch.tensor(K - 2, device=x.device), + cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + return start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,) * (dims - 1)] + + +def DPMS( + model, + condition, + uncondition, + cfg_scale, + model_type="noise", + noise_schedule="linear", + guidance_type="classifier-free", + model_kwargs=None, + diffusion_steps=1000, +): + if model_kwargs is None: + model_kwargs = {} + betas = torch.tensor(get_named_beta_schedule(noise_schedule, diffusion_steps)) + + ## 1. Define the noise schedule. + noise_schedule = NoiseScheduleVP(schedule="discrete", betas=betas) + + ## 2. Convert your discrete-time `model` to the continuous-time + ## noise prediction model. Here is an example for a diffusion model + ## `model` with the noise prediction type ("noise") . + model_fn = model_wrapper( + model, + noise_schedule, + model_type=model_type, + model_kwargs=model_kwargs, + guidance_type=guidance_type, + condition=condition, + unconditional_condition=uncondition, + guidance_scale=cfg_scale, + ) + ## 3. Define dpm-solver and sample by multistep DPM-Solver. + return DPM_Solver(model_fn, noise_schedule, algorithm_type="dpmsolver++") diff --git a/opensora/schedulers/iddpm/__init__.py b/opensora/schedulers/iddpm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b9806ad3d7a29fdd17c3c895f5dbc5647392ffd3 --- /dev/null +++ b/opensora/schedulers/iddpm/__init__.py @@ -0,0 +1,95 @@ +from functools import partial + +import torch + +from opensora.registry import SCHEDULERS + +from . import gaussian_diffusion as gd +from .respace import SpacedDiffusion, space_timesteps + + +@SCHEDULERS.register_module("iddpm") +class IDDPM(SpacedDiffusion): + def __init__( + self, + num_sampling_steps=None, + timestep_respacing=None, + noise_schedule="linear", + use_kl=False, + sigma_small=False, + predict_xstart=False, + learn_sigma=True, + rescale_learned_sigmas=False, + diffusion_steps=1000, + cfg_scale=4.0, + ): + betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps) + if use_kl: + loss_type = gd.LossType.RESCALED_KL + elif rescale_learned_sigmas: + loss_type = gd.LossType.RESCALED_MSE + else: + loss_type = gd.LossType.MSE + if num_sampling_steps is not None: + assert timestep_respacing is None + timestep_respacing = str(num_sampling_steps) + if timestep_respacing is None or timestep_respacing == "": + timestep_respacing = [diffusion_steps] + super().__init__( + use_timesteps=space_timesteps(diffusion_steps, timestep_respacing), + betas=betas, + model_mean_type=(gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X), + model_var_type=( + (gd.ModelVarType.FIXED_LARGE if not sigma_small else gd.ModelVarType.FIXED_SMALL) + if not learn_sigma + else gd.ModelVarType.LEARNED_RANGE + ), + loss_type=loss_type, + # rescale_timesteps=rescale_timesteps, + ) + + self.cfg_scale = cfg_scale + + def sample( + self, + model, + text_encoder, + z_size, + prompts, + device, + additional_args=None, + ): + n = len(prompts) + z = torch.randn(n, *z_size, device=device) + z = torch.cat([z, z], 0) + model_args = text_encoder.encode(prompts) + y_null = text_encoder.null(n) + model_args["y"] = torch.cat([model_args["y"], y_null], 0) + if additional_args is not None: + model_args.update(additional_args) + + forward = partial(forward_with_cfg, model, cfg_scale=self.cfg_scale) + samples = self.p_sample_loop( + forward, + z.shape, + z, + clip_denoised=False, + model_kwargs=model_args, + progress=True, + device=device, + ) + samples, _ = samples.chunk(2, dim=0) + return samples + + +def forward_with_cfg(model, x, timestep, y, cfg_scale, **kwargs): + # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb + half = x[: len(x) // 2] + combined = torch.cat([half, half], dim=0) + model_out = model.forward(combined, timestep, y, **kwargs) + model_out = model_out["x"] if isinstance(model_out, dict) else model_out + eps, rest = model_out[:, :3], model_out[:, 3:] + cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0) + half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps) + eps = torch.cat([half_eps, half_eps], dim=0) + return torch.cat([eps, rest], dim=1) diff --git a/opensora/schedulers/iddpm/diffusion_utils.py b/opensora/schedulers/iddpm/diffusion_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c097ac59c6de771c4aeb8b9193aba48a4dfc7c7e --- /dev/null +++ b/opensora/schedulers/iddpm/diffusion_utils.py @@ -0,0 +1,87 @@ +# Adapted from DiT + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DiT: https://github.com/facebookresearch/DiT/tree/main +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# -------------------------------------------------------- + + +import numpy as np +import torch as th + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, th.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for th.exp(). + logvar1, logvar2 = [x if isinstance(x, th.Tensor) else th.tensor(x).to(tensor) for x in (logvar1, logvar2)] + + return 0.5 * (-1.0 + logvar2 - logvar1 + th.exp(logvar1 - logvar2) + ((mean1 - mean2) ** 2) * th.exp(-logvar2)) + + +def approx_standard_normal_cdf(x): + """ + A fast approximation of the cumulative distribution function of the + standard normal. + """ + return 0.5 * (1.0 + th.tanh(np.sqrt(2.0 / np.pi) * (x + 0.044715 * th.pow(x, 3)))) + + +def continuous_gaussian_log_likelihood(x, *, means, log_scales): + """ + Compute the log-likelihood of a continuous Gaussian distribution. + :param x: the targets + :param means: the Gaussian mean Tensor. + :param log_scales: the Gaussian log stddev Tensor. + :return: a tensor like x of log probabilities (in nats). + """ + centered_x = x - means + inv_stdv = th.exp(-log_scales) + normalized_x = centered_x * inv_stdv + log_probs = th.distributions.Normal(th.zeros_like(x), th.ones_like(x)).log_prob(normalized_x) + return log_probs + + +def discretized_gaussian_log_likelihood(x, *, means, log_scales): + """ + Compute the log-likelihood of a Gaussian distribution discretizing to a + given image. + :param x: the target images. It is assumed that this was uint8 values, + rescaled to the range [-1, 1]. + :param means: the Gaussian mean Tensor. + :param log_scales: the Gaussian log stddev Tensor. + :return: a tensor like x of log probabilities (in nats). + """ + assert x.shape == means.shape == log_scales.shape + centered_x = x - means + inv_stdv = th.exp(-log_scales) + plus_in = inv_stdv * (centered_x + 1.0 / 255.0) + cdf_plus = approx_standard_normal_cdf(plus_in) + min_in = inv_stdv * (centered_x - 1.0 / 255.0) + cdf_min = approx_standard_normal_cdf(min_in) + log_cdf_plus = th.log(cdf_plus.clamp(min=1e-12)) + log_one_minus_cdf_min = th.log((1.0 - cdf_min).clamp(min=1e-12)) + cdf_delta = cdf_plus - cdf_min + log_probs = th.where( + x < -0.999, + log_cdf_plus, + th.where(x > 0.999, log_one_minus_cdf_min, th.log(cdf_delta.clamp(min=1e-12))), + ) + assert log_probs.shape == x.shape + return log_probs diff --git a/opensora/schedulers/iddpm/gaussian_diffusion.py b/opensora/schedulers/iddpm/gaussian_diffusion.py new file mode 100644 index 0000000000000000000000000000000000000000..4a74592c061150b87787babcf1ad7603cd1289e4 --- /dev/null +++ b/opensora/schedulers/iddpm/gaussian_diffusion.py @@ -0,0 +1,835 @@ +# Adapted from DiT + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DiT: https://github.com/facebookresearch/DiT/tree/main +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# -------------------------------------------------------- + +import enum +import math + +import numpy as np +import torch as th + +from .diffusion_utils import discretized_gaussian_log_likelihood, normal_kl + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +class ModelMeanType(enum.Enum): + """ + Which type of output the model predicts. + """ + + PREVIOUS_X = enum.auto() # the model predicts x_{t-1} + START_X = enum.auto() # the model predicts x_0 + EPSILON = enum.auto() # the model predicts epsilon + + +class ModelVarType(enum.Enum): + """ + What is used as the model's output variance. + The LEARNED_RANGE option has been added to allow the model to predict + values between FIXED_SMALL and FIXED_LARGE, making its job easier. + """ + + LEARNED = enum.auto() + FIXED_SMALL = enum.auto() + FIXED_LARGE = enum.auto() + LEARNED_RANGE = enum.auto() + + +class LossType(enum.Enum): + MSE = enum.auto() # use raw MSE loss (and KL when learning variances) + RESCALED_MSE = enum.auto() # use raw MSE loss (with RESCALED_KL when learning variances) + KL = enum.auto() # use the variational lower-bound + RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB + + def is_vb(self): + return self == LossType.KL or self == LossType.RESCALED_KL + + +def _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac): + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + warmup_time = int(num_diffusion_timesteps * warmup_frac) + betas[:warmup_time] = np.linspace(beta_start, beta_end, warmup_time, dtype=np.float64) + return betas + + +def get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps): + """ + This is the deprecated API for creating beta schedules. + See get_named_beta_schedule() for the new library of schedules. + """ + if beta_schedule == "quad": + betas = ( + np.linspace( + beta_start**0.5, + beta_end**0.5, + num_diffusion_timesteps, + dtype=np.float64, + ) + ** 2 + ) + elif beta_schedule == "linear": + betas = np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "warmup10": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.1) + elif beta_schedule == "warmup50": + betas = _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, 0.5) + elif beta_schedule == "const": + betas = beta_end * np.ones(num_diffusion_timesteps, dtype=np.float64) + elif beta_schedule == "jsd": # 1/T, 1/(T-1), 1/(T-2), ..., 1 + betas = 1.0 / np.linspace(num_diffusion_timesteps, 1, num_diffusion_timesteps, dtype=np.float64) + else: + raise NotImplementedError(beta_schedule) + assert betas.shape == (num_diffusion_timesteps,) + return betas + + +def get_named_beta_schedule(schedule_name, num_diffusion_timesteps): + """ + Get a pre-defined beta schedule for the given name. + The beta schedule library consists of beta schedules which remain similar + in the limit of num_diffusion_timesteps. + Beta schedules may be added, but should not be removed or changed once + they are committed to maintain backwards compatibility. + """ + if schedule_name == "linear": + # Linear schedule from Ho et al, extended to work for any number of + # diffusion steps. + scale = 1000 / num_diffusion_timesteps + return get_beta_schedule( + "linear", + beta_start=scale * 0.0001, + beta_end=scale * 0.02, + num_diffusion_timesteps=num_diffusion_timesteps, + ) + elif schedule_name == "squaredcos_cap_v2": + return betas_for_alpha_bar( + num_diffusion_timesteps, + lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2, + ) + else: + raise NotImplementedError(f"unknown beta schedule: {schedule_name}") + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +class GaussianDiffusion: + """ + Utilities for training and sampling diffusion models. + Original ported from this codebase: + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/diffusion_utils_2.py#L42 + :param betas: a 1-D numpy array of betas for each diffusion timestep, + starting at T and going to 1. + """ + + def __init__(self, *, betas, model_mean_type, model_var_type, loss_type): + self.model_mean_type = model_mean_type + self.model_var_type = model_var_type + self.loss_type = loss_type + + # Use float64 for accuracy. + betas = np.array(betas, dtype=np.float64) + self.betas = betas + assert len(betas.shape) == 1, "betas must be 1-D" + assert (betas > 0).all() and (betas <= 1).all() + + self.num_timesteps = int(betas.shape[0]) + + alphas = 1.0 - betas + self.alphas_cumprod = np.cumprod(alphas, axis=0) + self.alphas_cumprod_prev = np.append(1.0, self.alphas_cumprod[:-1]) + self.alphas_cumprod_next = np.append(self.alphas_cumprod[1:], 0.0) + assert self.alphas_cumprod_prev.shape == (self.num_timesteps,) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.sqrt_alphas_cumprod = np.sqrt(self.alphas_cumprod) + self.sqrt_one_minus_alphas_cumprod = np.sqrt(1.0 - self.alphas_cumprod) + self.log_one_minus_alphas_cumprod = np.log(1.0 - self.alphas_cumprod) + self.sqrt_recip_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod) + self.sqrt_recipm1_alphas_cumprod = np.sqrt(1.0 / self.alphas_cumprod - 1) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + self.posterior_variance = betas * (1.0 - self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.posterior_log_variance_clipped = ( + np.log(np.append(self.posterior_variance[1], self.posterior_variance[1:])) + if len(self.posterior_variance) > 1 + else np.array([]) + ) + + self.posterior_mean_coef1 = betas * np.sqrt(self.alphas_cumprod_prev) / (1.0 - self.alphas_cumprod) + self.posterior_mean_coef2 = (1.0 - self.alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - self.alphas_cumprod) + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + variance = _extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = _extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def q_sample(self, x_start, t, noise=None): + """ + Diffuse the data for a given number of diffusion steps. + In other words, sample from q(x_t | x_0). + :param x_start: the initial data batch. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :param noise: if specified, the split-out normal noise. + :return: A noisy version of x_start. + """ + if noise is None: + noise = th.randn_like(x_start) + assert noise.shape == x_start.shape + return ( + _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + ) + + def q_posterior_mean_variance(self, x_start, x_t, t): + """ + Compute the mean and variance of the diffusion posterior: + q(x_{t-1} | x_t, x_0) + """ + assert x_start.shape == x_t.shape + posterior_mean = ( + _extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + _extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = _extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = _extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + assert ( + posterior_mean.shape[0] + == posterior_variance.shape[0] + == posterior_log_variance_clipped.shape[0] + == x_start.shape[0] + ) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None): + """ + Apply the model to get p(x_{t-1} | x_t), as well as a prediction of + the initial x, x_0. + :param model: the model, which takes a signal and a batch of timesteps + as input. + :param x: the [N x C x ...] tensor at time t. + :param t: a 1-D Tensor of timesteps. + :param clip_denoised: if True, clip the denoised signal into [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. Applies before + clip_denoised. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict with the following keys: + - 'mean': the model mean output. + - 'variance': the model variance output. + - 'log_variance': the log of 'variance'. + - 'pred_xstart': the prediction for x_0. + """ + if model_kwargs is None: + model_kwargs = {} + + B, C = x.shape[:2] + assert t.shape == (B,) + model_output = model(x, t, **model_kwargs) + if isinstance(model_output, tuple): + model_output, extra = model_output + else: + extra = None + + if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]: + assert model_output.shape == (B, C * 2, *x.shape[2:]) + model_output, model_var_values = th.split(model_output, C, dim=1) + min_log = _extract_into_tensor(self.posterior_log_variance_clipped, t, x.shape) + max_log = _extract_into_tensor(np.log(self.betas), t, x.shape) + # The model_var_values is [-1, 1] for [min_var, max_var]. + frac = (model_var_values + 1) / 2 + model_log_variance = frac * max_log + (1 - frac) * min_log + model_variance = th.exp(model_log_variance) + else: + model_variance, model_log_variance = { + # for fixedlarge, we set the initial (log-)variance like so + # to get a better decoder log likelihood. + ModelVarType.FIXED_LARGE: ( + np.append(self.posterior_variance[1], self.betas[1:]), + np.log(np.append(self.posterior_variance[1], self.betas[1:])), + ), + ModelVarType.FIXED_SMALL: ( + self.posterior_variance, + self.posterior_log_variance_clipped, + ), + }[self.model_var_type] + model_variance = _extract_into_tensor(model_variance, t, x.shape) + model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape) + + def process_xstart(x): + if denoised_fn is not None: + x = denoised_fn(x) + if clip_denoised: + return x.clamp(-1, 1) + return x + + if self.model_mean_type == ModelMeanType.START_X: + pred_xstart = process_xstart(model_output) + else: + pred_xstart = process_xstart(self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)) + model_mean, _, _ = self.q_posterior_mean_variance(x_start=pred_xstart, x_t=x, t=t) + + assert model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape + return { + "mean": model_mean, + "variance": model_variance, + "log_variance": model_log_variance, + "pred_xstart": pred_xstart, + "extra": extra, + } + + def _predict_xstart_from_eps(self, x_t, t, eps): + assert x_t.shape == eps.shape + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t + - _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * eps + ) + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute the mean for the previous step, given a function cond_fn that + computes the gradient of a conditional log probability with respect to + x. In particular, cond_fn computes grad(log(p(y|x))), and we want to + condition on y. + This uses the conditioning strategy from Sohl-Dickstein et al. (2015). + """ + gradient = cond_fn(x, t, **model_kwargs) + new_mean = p_mean_var["mean"].float() + p_mean_var["variance"] * gradient.float() + return new_mean + + def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None): + """ + Compute what the p_mean_variance output would have been, should the + model's score function be conditioned by cond_fn. + See condition_mean() for details on cond_fn. + Unlike condition_mean(), this instead uses the conditioning strategy + from Song et al (2020). + """ + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + + eps = self._predict_eps_from_xstart(x, t, p_mean_var["pred_xstart"]) + eps = eps - (1 - alpha_bar).sqrt() * cond_fn(x, t, **model_kwargs) + + out = p_mean_var.copy() + out["pred_xstart"] = self._predict_xstart_from_eps(x, t, eps) + out["mean"], _, _ = self.q_posterior_mean_variance(x_start=out["pred_xstart"], x_t=x, t=t) + return out + + def p_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + ): + """ + Sample x_{t-1} from the model at the given timestep. + :param model: the model to sample from. + :param x: the current tensor at x_{t-1}. + :param t: the value of t, starting at 0 for the first diffusion step. + :param clip_denoised: if True, clip the x_start prediction to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict containing the following keys: + - 'sample': a random sample from the model. + - 'pred_xstart': a prediction of x_0. + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + noise = th.randn_like(x) + nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) # no noise when t == 0 + if cond_fn is not None: + out["mean"] = self.condition_mean(cond_fn, out, x, t, model_kwargs=model_kwargs) + sample = out["mean"] + nonzero_mask * th.exp(0.5 * out["log_variance"]) * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + def p_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + ): + """ + Generate samples from the model. + :param model: the model module. + :param shape: the shape of the samples, (N, C, H, W). + :param noise: if specified, the noise from the encoder to sample. + Should be of the same shape as `shape`. + :param clip_denoised: if True, clip x_start predictions to [-1, 1]. + :param denoised_fn: if not None, a function which applies to the + x_start prediction before it is used to sample. + :param cond_fn: if not None, this is a gradient function that acts + similarly to the model. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param device: if specified, the device to create the samples on. + If not specified, use a model parameter's device. + :param progress: if True, show a tqdm progress bar. + :return: a non-differentiable batch of samples. + """ + final = None + for sample in self.p_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + ): + final = sample + return final["sample"] + + def p_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + ): + """ + Generate samples from the model and yield intermediate samples from + each timestep of diffusion. + Arguments are the same as p_sample_loop(). + Returns a generator over dicts, where each dict is the return value of + p_sample(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0], device=device) + with th.no_grad(): + out = self.p_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + ) + yield out + img = out["sample"] + + def ddim_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t-1} from the model using DDIM. + Same usage as p_sample(). + """ + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) + + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = self._predict_eps_from_xstart(x, t, out["pred_xstart"]) + + alpha_bar = _extract_into_tensor(self.alphas_cumprod, t, x.shape) + alpha_bar_prev = _extract_into_tensor(self.alphas_cumprod_prev, t, x.shape) + sigma = eta * th.sqrt((1 - alpha_bar_prev) / (1 - alpha_bar)) * th.sqrt(1 - alpha_bar / alpha_bar_prev) + # Equation 12. + noise = th.randn_like(x) + mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_prev) + th.sqrt(1 - alpha_bar_prev - sigma**2) * eps + nonzero_mask = (t != 0).float().view(-1, *([1] * (len(x.shape) - 1))) # no noise when t == 0 + sample = mean_pred + nonzero_mask * sigma * noise + return {"sample": sample, "pred_xstart": out["pred_xstart"]} + + def ddim_reverse_sample( + self, + model, + x, + t, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + eta=0.0, + ): + """ + Sample x_{t+1} from the model using DDIM reverse ODE. + """ + assert eta == 0.0, "Reverse ODE only for deterministic path" + out = self.p_mean_variance( + model, + x, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + model_kwargs=model_kwargs, + ) + if cond_fn is not None: + out = self.condition_score(cond_fn, out, x, t, model_kwargs=model_kwargs) + # Usually our model outputs epsilon, but we re-derive it + # in case we used x_start or x_prev prediction. + eps = ( + _extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x.shape) * x - out["pred_xstart"] + ) / _extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x.shape) + alpha_bar_next = _extract_into_tensor(self.alphas_cumprod_next, t, x.shape) + + # Equation 12. reversed + mean_pred = out["pred_xstart"] * th.sqrt(alpha_bar_next) + th.sqrt(1 - alpha_bar_next) * eps + + return {"sample": mean_pred, "pred_xstart": out["pred_xstart"]} + + def ddim_sample_loop( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + ): + """ + Generate samples from the model using DDIM. + Same usage as p_sample_loop(). + """ + final = None + for sample in self.ddim_sample_loop_progressive( + model, + shape, + noise=noise, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + device=device, + progress=progress, + eta=eta, + ): + final = sample + return final["sample"] + + def ddim_sample_loop_progressive( + self, + model, + shape, + noise=None, + clip_denoised=True, + denoised_fn=None, + cond_fn=None, + model_kwargs=None, + device=None, + progress=False, + eta=0.0, + ): + """ + Use DDIM to sample from the model and yield intermediate samples from + each timestep of DDIM. + Same usage as p_sample_loop_progressive(). + """ + if device is None: + device = next(model.parameters()).device + assert isinstance(shape, (tuple, list)) + if noise is not None: + img = noise + else: + img = th.randn(*shape, device=device) + indices = list(range(self.num_timesteps))[::-1] + + if progress: + # Lazy import so that we don't depend on tqdm. + from tqdm.auto import tqdm + + indices = tqdm(indices) + + for i in indices: + t = th.tensor([i] * shape[0], device=device) + with th.no_grad(): + out = self.ddim_sample( + model, + img, + t, + clip_denoised=clip_denoised, + denoised_fn=denoised_fn, + cond_fn=cond_fn, + model_kwargs=model_kwargs, + eta=eta, + ) + yield out + img = out["sample"] + + def _vb_terms_bpd(self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None): + """ + Get a term for the variational lower-bound. + The resulting units are bits (rather than nats, as one might expect). + This allows for comparison to other papers. + :return: a dict with the following keys: + - 'output': a shape [N] tensor of NLLs or KLs. + - 'pred_xstart': the x_0 predictions. + """ + true_mean, _, true_log_variance_clipped = self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t) + out = self.p_mean_variance(model, x_t, t, clip_denoised=clip_denoised, model_kwargs=model_kwargs) + kl = normal_kl(true_mean, true_log_variance_clipped, out["mean"], out["log_variance"]) + kl = mean_flat(kl) / np.log(2.0) + + decoder_nll = -discretized_gaussian_log_likelihood( + x_start, means=out["mean"], log_scales=0.5 * out["log_variance"] + ) + assert decoder_nll.shape == x_start.shape + decoder_nll = mean_flat(decoder_nll) / np.log(2.0) + + # At the first timestep return the decoder NLL, + # otherwise return KL(q(x_{t-1}|x_t,x_0) || p(x_{t-1}|x_t)) + output = th.where((t == 0), decoder_nll, kl) + return {"output": output, "pred_xstart": out["pred_xstart"]} + + def training_losses(self, model, x_start, t, model_kwargs=None, noise=None): + """ + Compute training losses for a single timestep. + :param model: the model to evaluate loss on. + :param x_start: the [N x C x ...] tensor of inputs. + :param t: a batch of timestep indices. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :param noise: if specified, the specific Gaussian noise to try to remove. + :return: a dict with the key "loss" containing a tensor of shape [N]. + Some mean or variance settings may also have other keys. + """ + if model_kwargs is None: + model_kwargs = {} + if noise is None: + noise = th.randn_like(x_start) + x_t = self.q_sample(x_start, t, noise=noise) + + terms = {} + + if self.loss_type == LossType.KL or self.loss_type == LossType.RESCALED_KL: + terms["loss"] = self._vb_terms_bpd( + model=model, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + model_kwargs=model_kwargs, + )["output"] + if self.loss_type == LossType.RESCALED_KL: + terms["loss"] *= self.num_timesteps + elif self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE: + model_output = model(x_t, t, **model_kwargs) + + if self.model_var_type in [ + ModelVarType.LEARNED, + ModelVarType.LEARNED_RANGE, + ]: + B, C = x_t.shape[:2] + assert model_output.shape == (B, C * 2, *x_t.shape[2:]) + model_output, model_var_values = th.split(model_output, C, dim=1) + # Learn the variance using the variational bound, but don't let + # it affect our mean prediction. + frozen_out = th.cat([model_output.detach(), model_var_values], dim=1) + terms["vb"] = self._vb_terms_bpd( + model=lambda *args, r=frozen_out: r, + x_start=x_start, + x_t=x_t, + t=t, + clip_denoised=False, + )["output"] + if self.loss_type == LossType.RESCALED_MSE: + # Divide by 1000 for equivalence with initial implementation. + # Without a factor of 1/1000, the VB term hurts the MSE term. + terms["vb"] *= self.num_timesteps / 1000.0 + + target = { + ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(x_start=x_start, x_t=x_t, t=t)[0], + ModelMeanType.START_X: x_start, + ModelMeanType.EPSILON: noise, + }[self.model_mean_type] + assert model_output.shape == target.shape == x_start.shape + terms["mse"] = mean_flat((target - model_output) ** 2) + if "vb" in terms: + terms["loss"] = terms["mse"] + terms["vb"] + else: + terms["loss"] = terms["mse"] + else: + raise NotImplementedError(self.loss_type) + + return terms + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = th.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None): + """ + Compute the entire variational lower-bound, measured in bits-per-dim, + as well as other related quantities. + :param model: the model to evaluate loss on. + :param x_start: the [N x C x ...] tensor of inputs. + :param clip_denoised: if True, clip denoised samples. + :param model_kwargs: if not None, a dict of extra keyword arguments to + pass to the model. This can be used for conditioning. + :return: a dict containing the following keys: + - total_bpd: the total variational lower-bound, per batch element. + - prior_bpd: the prior term in the lower-bound. + - vb: an [N x T] tensor of terms in the lower-bound. + - xstart_mse: an [N x T] tensor of x_0 MSEs for each timestep. + - mse: an [N x T] tensor of epsilon MSEs for each timestep. + """ + device = x_start.device + batch_size = x_start.shape[0] + + vb = [] + xstart_mse = [] + mse = [] + for t in list(range(self.num_timesteps))[::-1]: + t_batch = th.tensor([t] * batch_size, device=device) + noise = th.randn_like(x_start) + x_t = self.q_sample(x_start=x_start, t=t_batch, noise=noise) + # Calculate VLB term at the current timestep + with th.no_grad(): + out = self._vb_terms_bpd( + model, + x_start=x_start, + x_t=x_t, + t=t_batch, + clip_denoised=clip_denoised, + model_kwargs=model_kwargs, + ) + vb.append(out["output"]) + xstart_mse.append(mean_flat((out["pred_xstart"] - x_start) ** 2)) + eps = self._predict_eps_from_xstart(x_t, t_batch, out["pred_xstart"]) + mse.append(mean_flat((eps - noise) ** 2)) + + vb = th.stack(vb, dim=1) + xstart_mse = th.stack(xstart_mse, dim=1) + mse = th.stack(mse, dim=1) + + prior_bpd = self._prior_bpd(x_start) + total_bpd = vb.sum(dim=1) + prior_bpd + return { + "total_bpd": total_bpd, + "prior_bpd": prior_bpd, + "vb": vb, + "xstart_mse": xstart_mse, + "mse": mse, + } + + +def _extract_into_tensor(arr, timesteps, broadcast_shape): + """ + Extract values from a 1-D numpy array for a batch of indices. + :param arr: the 1-D numpy array. + :param timesteps: a tensor of indices into the array to extract. + :param broadcast_shape: a larger shape of K dimensions with the batch + dimension equal to the length of timesteps. + :return: a tensor of shape [batch_size, 1, ...] where the shape has K dims. + """ + res = th.from_numpy(arr).to(device=timesteps.device)[timesteps].float() + while len(res.shape) < len(broadcast_shape): + res = res[..., None] + return res + th.zeros(broadcast_shape, device=timesteps.device) diff --git a/opensora/schedulers/iddpm/respace.py b/opensora/schedulers/iddpm/respace.py new file mode 100644 index 0000000000000000000000000000000000000000..d5ea16cedc356d0b93ed1f16cc802958b2af50ac --- /dev/null +++ b/opensora/schedulers/iddpm/respace.py @@ -0,0 +1,127 @@ +# Adapted from DiT + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DiT: https://github.com/facebookresearch/DiT/tree/main +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# -------------------------------------------------------- + + +import numpy as np +import torch as th + +from .gaussian_diffusion import GaussianDiffusion + + +def space_timesteps(num_timesteps, section_counts): + """ + Create a list of timesteps to use from an original diffusion process, + given the number of timesteps we want to take from equally-sized portions + of the original process. + For example, if there's 300 timesteps and the section counts are [10,15,20] + then the first 100 timesteps are strided to be 10 timesteps, the second 100 + are strided to be 15 timesteps, and the final 100 are strided to be 20. + If the stride is a string starting with "ddim", then the fixed striding + from the DDIM paper is used, and only one section is allowed. + :param num_timesteps: the number of diffusion steps in the original + process to divide up. + :param section_counts: either a list of numbers, or a string containing + comma-separated numbers, indicating the step count + per section. As a special case, use "ddimN" where N + is a number of steps to use the striding from the + DDIM paper. + :return: a set of diffusion steps from the original process to use. + """ + if isinstance(section_counts, str): + if section_counts.startswith("ddim"): + desired_count = int(section_counts[len("ddim") :]) + for i in range(1, num_timesteps): + if len(range(0, num_timesteps, i)) == desired_count: + return set(range(0, num_timesteps, i)) + raise ValueError(f"cannot create exactly {num_timesteps} steps with an integer stride") + section_counts = [int(x) for x in section_counts.split(",")] + size_per = num_timesteps // len(section_counts) + extra = num_timesteps % len(section_counts) + start_idx = 0 + all_steps = [] + for i, section_count in enumerate(section_counts): + size = size_per + (1 if i < extra else 0) + if size < section_count: + raise ValueError(f"cannot divide section of {size} steps into {section_count}") + if section_count <= 1: + frac_stride = 1 + else: + frac_stride = (size - 1) / (section_count - 1) + cur_idx = 0.0 + taken_steps = [] + for _ in range(section_count): + taken_steps.append(start_idx + round(cur_idx)) + cur_idx += frac_stride + all_steps += taken_steps + start_idx += size + return set(all_steps) + + +class SpacedDiffusion(GaussianDiffusion): + """ + A diffusion process which can skip steps in a base diffusion process. + :param use_timesteps: a collection (sequence or set) of timesteps from the + original diffusion process to retain. + :param kwargs: the kwargs to create the base diffusion process. + """ + + def __init__(self, use_timesteps, **kwargs): + self.use_timesteps = set(use_timesteps) + self.timestep_map = [] + self.original_num_steps = len(kwargs["betas"]) + + base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa + last_alpha_cumprod = 1.0 + new_betas = [] + for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): + if i in self.use_timesteps: + new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) + last_alpha_cumprod = alpha_cumprod + self.timestep_map.append(i) + kwargs["betas"] = np.array(new_betas) + super().__init__(**kwargs) + + def p_mean_variance(self, model, *args, **kwargs): # pylint: disable=signature-differs + return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) + + def training_losses(self, model, *args, **kwargs): # pylint: disable=signature-differs + return super().training_losses(self._wrap_model(model), *args, **kwargs) + + def condition_mean(self, cond_fn, *args, **kwargs): + return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) + + def condition_score(self, cond_fn, *args, **kwargs): + return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) + + def _wrap_model(self, model): + if isinstance(model, _WrappedModel): + return model + return _WrappedModel(model, self.timestep_map, self.original_num_steps) + + def _scale_timesteps(self, t): + # Scaling is done by the wrapped model. + return t + + +class _WrappedModel: + def __init__(self, model, timestep_map, original_num_steps): + self.model = model + self.timestep_map = timestep_map + # self.rescale_timesteps = rescale_timesteps + self.original_num_steps = original_num_steps + + def __call__(self, x, ts, **kwargs): + map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) + new_ts = map_tensor[ts] + # if self.rescale_timesteps: + # new_ts = new_ts.float() * (1000.0 / self.original_num_steps) + return self.model(x, new_ts, **kwargs) diff --git a/opensora/schedulers/iddpm/timestep_sampler.py b/opensora/schedulers/iddpm/timestep_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..52b6717d528f398cd08f34c347b7fb69f4d5a9a3 --- /dev/null +++ b/opensora/schedulers/iddpm/timestep_sampler.py @@ -0,0 +1,150 @@ +# Adapted from DiT + +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# -------------------------------------------------------- +# References: +# DiT: https://github.com/facebookresearch/DiT/tree/main +# GLIDE: https://github.com/openai/glide-text2im/blob/main/glide_text2im/gaussian_diffusion.py +# ADM: https://github.com/openai/guided-diffusion/blob/main/guided_diffusion +# IDDPM: https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# -------------------------------------------------------- + +from abc import ABC, abstractmethod + +import numpy as np +import torch as th +import torch.distributed as dist + + +def create_named_schedule_sampler(name, diffusion): + """ + Create a ScheduleSampler from a library of pre-defined samplers. + :param name: the name of the sampler. + :param diffusion: the diffusion object to sample for. + """ + if name == "uniform": + return UniformSampler(diffusion) + elif name == "loss-second-moment": + return LossSecondMomentResampler(diffusion) + else: + raise NotImplementedError(f"unknown schedule sampler: {name}") + + +class ScheduleSampler(ABC): + """ + A distribution over timesteps in the diffusion process, intended to reduce + variance of the objective. + By default, samplers perform unbiased importance sampling, in which the + objective's mean is unchanged. + However, subclasses may override sample() to change how the resampled + terms are reweighted, allowing for actual changes in the objective. + """ + + @abstractmethod + def weights(self): + """ + Get a numpy array of weights, one per diffusion step. + The weights needn't be normalized, but must be positive. + """ + + def sample(self, batch_size, device): + """ + Importance-sample timesteps for a batch. + :param batch_size: the number of timesteps. + :param device: the torch device to save to. + :return: a tuple (timesteps, weights): + - timesteps: a tensor of timestep indices. + - weights: a tensor of weights to scale the resulting losses. + """ + w = self.weights() + p = w / np.sum(w) + indices_np = np.random.choice(len(p), size=(batch_size,), p=p) + indices = th.from_numpy(indices_np).long().to(device) + weights_np = 1 / (len(p) * p[indices_np]) + weights = th.from_numpy(weights_np).float().to(device) + return indices, weights + + +class UniformSampler(ScheduleSampler): + def __init__(self, diffusion): + self.diffusion = diffusion + self._weights = np.ones([diffusion.num_timesteps]) + + def weights(self): + return self._weights + + +class LossAwareSampler(ScheduleSampler): + def update_with_local_losses(self, local_ts, local_losses): + """ + Update the reweighting using losses from a model. + Call this method from each rank with a batch of timesteps and the + corresponding losses for each of those timesteps. + This method will perform synchronization to make sure all of the ranks + maintain the exact same reweighting. + :param local_ts: an integer Tensor of timesteps. + :param local_losses: a 1D Tensor of losses. + """ + batch_sizes = [th.tensor([0], dtype=th.int32, device=local_ts.device) for _ in range(dist.get_world_size())] + dist.all_gather( + batch_sizes, + th.tensor([len(local_ts)], dtype=th.int32, device=local_ts.device), + ) + + # Pad all_gather batches to be the maximum batch size. + batch_sizes = [x.item() for x in batch_sizes] + max_bs = max(batch_sizes) + + timestep_batches = [th.zeros(max_bs).to(local_ts) for bs in batch_sizes] + loss_batches = [th.zeros(max_bs).to(local_losses) for bs in batch_sizes] + dist.all_gather(timestep_batches, local_ts) + dist.all_gather(loss_batches, local_losses) + timesteps = [x.item() for y, bs in zip(timestep_batches, batch_sizes) for x in y[:bs]] + losses = [x.item() for y, bs in zip(loss_batches, batch_sizes) for x in y[:bs]] + self.update_with_all_losses(timesteps, losses) + + @abstractmethod + def update_with_all_losses(self, ts, losses): + """ + Update the reweighting using losses from a model. + Sub-classes should override this method to update the reweighting + using losses from the model. + This method directly updates the reweighting without synchronizing + between workers. It is called by update_with_local_losses from all + ranks with identical arguments. Thus, it should have deterministic + behavior to maintain state across workers. + :param ts: a list of int timesteps. + :param losses: a list of float losses, one per timestep. + """ + + +class LossSecondMomentResampler(LossAwareSampler): + def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001): + self.diffusion = diffusion + self.history_per_term = history_per_term + self.uniform_prob = uniform_prob + self._loss_history = np.zeros([diffusion.num_timesteps, history_per_term], dtype=np.float64) + self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int) + + def weights(self): + if not self._warmed_up(): + return np.ones([self.diffusion.num_timesteps], dtype=np.float64) + weights = np.sqrt(np.mean(self._loss_history**2, axis=-1)) + weights /= np.sum(weights) + weights *= 1 - self.uniform_prob + weights += self.uniform_prob / len(weights) + return weights + + def update_with_all_losses(self, ts, losses): + for t, loss in zip(ts, losses): + if self._loss_counts[t] == self.history_per_term: + # Shift out the oldest loss term. + self._loss_history[t, :-1] = self._loss_history[t, 1:] + self._loss_history[t, -1] = loss + else: + self._loss_history[t, self._loss_counts[t]] = loss + self._loss_counts[t] += 1 + + def _warmed_up(self): + return (self._loss_counts == self.history_per_term).all() diff --git a/opensora/utils/__init__.py b/opensora/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/opensora/utils/ckpt_utils.py b/opensora/utils/ckpt_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..27adfba19cffaf2a0aa587c71188f3c5702ce3cc --- /dev/null +++ b/opensora/utils/ckpt_utils.py @@ -0,0 +1,216 @@ +import functools +import json +import logging +import operator +import os +from typing import Tuple + +import colossalai +import torch +import torch.distributed as dist +import torch.nn as nn +from colossalai.booster import Booster +from colossalai.checkpoint_io import GeneralCheckpointIO +from colossalai.cluster import DistCoordinator +from torch.optim import Optimizer +from torch.optim.lr_scheduler import _LRScheduler +from torchvision.datasets.utils import download_url + +pretrained_models = { + "DiT-XL-2-512x512.pt": "https://dl.fbaipublicfiles.com/DiT/models/DiT-XL-2-512x512.pt", + "DiT-XL-2-256x256.pt": "https://dl.fbaipublicfiles.com/DiT/models/DiT-XL-2-256x256.pt", + "Latte-XL-2-256x256-ucf101.pt": "https://huggingface.co/maxin-cn/Latte/resolve/main/ucf101.pt", + "PixArt-XL-2-256x256.pth": "https://huggingface.co/PixArt-alpha/PixArt-alpha/resolve/main/PixArt-XL-2-256x256.pth", + "PixArt-XL-2-SAM-256x256.pth": "https://huggingface.co/PixArt-alpha/PixArt-alpha/resolve/main/PixArt-XL-2-SAM-256x256.pth", + "PixArt-XL-2-512x512.pth": "https://huggingface.co/PixArt-alpha/PixArt-alpha/resolve/main/PixArt-XL-2-512x512.pth", + "PixArt-XL-2-1024-MS.pth": "https://huggingface.co/PixArt-alpha/PixArt-alpha/resolve/main/PixArt-XL-2-1024-MS.pth", +} + + +def reparameter(ckpt, name=None): + if "DiT" in name: + ckpt["x_embedder.proj.weight"] = ckpt["x_embedder.proj.weight"].unsqueeze(2) + del ckpt["pos_embed"] + elif "Latte" in name: + ckpt = ckpt["ema"] + ckpt["x_embedder.proj.weight"] = ckpt["x_embedder.proj.weight"].unsqueeze(2) + del ckpt["pos_embed"] + del ckpt["temp_embed"] + elif "PixArt" in name: + ckpt = ckpt["state_dict"] + ckpt["x_embedder.proj.weight"] = ckpt["x_embedder.proj.weight"].unsqueeze(2) + del ckpt["pos_embed"] + return ckpt + + +def find_model(model_name): + """ + Finds a pre-trained DiT model, downloading it if necessary. Alternatively, loads a model from a local path. + """ + if model_name in pretrained_models: # Find/download our pre-trained DiT checkpoints + model = download_model(model_name) + model = reparameter(model, model_name) + return model + else: # Load a custom DiT checkpoint: + assert os.path.isfile(model_name), f"Could not find DiT checkpoint at {model_name}" + checkpoint = torch.load(model_name, map_location=lambda storage, loc: storage) + if "pos_embed_temporal" in checkpoint: + del checkpoint["pos_embed_temporal"] + if "pos_embed" in checkpoint: + del checkpoint["pos_embed"] + if "ema" in checkpoint: # supports checkpoints from train.py + checkpoint = checkpoint["ema"] + return checkpoint + + +def download_model(model_name): + """ + Downloads a pre-trained DiT model from the web. + """ + assert model_name in pretrained_models + local_path = f"pretrained_models/{model_name}" + if not os.path.isfile(local_path): + os.makedirs("pretrained_models", exist_ok=True) + web_path = pretrained_models[model_name] + download_url(web_path, "pretrained_models", model_name) + model = torch.load(local_path, map_location=lambda storage, loc: storage) + return model + + +def load_from_sharded_state_dict(model, ckpt_path): + ckpt_io = GeneralCheckpointIO() + ckpt_io.load_model(model, os.path.join(ckpt_path, "model")) + +def model_sharding(model: torch.nn.Module): + global_rank = dist.get_rank() + world_size = dist.get_world_size() + for _, param in model.named_parameters(): + padding_size = (world_size - param.numel() % world_size) % world_size + if padding_size > 0: + padding_param = torch.nn.functional.pad(param.data.view(-1), [0, padding_size]) + else: + padding_param = param.data.view(-1) + splited_params = padding_param.split(padding_param.numel() // world_size) + splited_params = splited_params[global_rank] + param.data = splited_params + + +def load_json(file_path: str): + with open(file_path, "r") as f: + return json.load(f) + + +def save_json(data, file_path: str): + with open(file_path, "w") as f: + json.dump(data, f, indent=4) + + +def remove_padding(tensor: torch.Tensor, original_shape: Tuple) -> torch.Tensor: + return tensor[: functools.reduce(operator.mul, original_shape)] + + +def model_gathering(model: torch.nn.Module, model_shape_dict: dict): + global_rank = dist.get_rank() + global_size = dist.get_world_size() + for name, param in model.named_parameters(): + all_params = [torch.empty_like(param.data) for _ in range(global_size)] + dist.all_gather(all_params, param.data, group=dist.group.WORLD) + if int(global_rank) == 0: + all_params = torch.cat(all_params) + param.data = remove_padding(all_params, model_shape_dict[name]).view(model_shape_dict[name]) + dist.barrier() + + +def record_model_param_shape(model: torch.nn.Module) -> dict: + param_shape = {} + for name, param in model.named_parameters(): + param_shape[name] = param.shape + return param_shape + + +def save( + booster: Booster, + model: nn.Module, + ema: nn.Module, + optimizer: Optimizer, + lr_scheduler: _LRScheduler, + epoch: int, + step: int, + global_step: int, + batch_size: int, + coordinator: DistCoordinator, + save_dir: str, + shape_dict: dict, +): + save_dir = os.path.join(save_dir, f"epoch{epoch}-global_step{global_step}") + os.makedirs(os.path.join(save_dir, "model"), exist_ok=True) + + booster.save_model(model, os.path.join(save_dir, "model"), shard=True) + # ema is not boosted, so we don't need to use booster.save_model + model_gathering(ema, shape_dict) + global_rank = dist.get_rank() + if int(global_rank) == 0: + torch.save(ema.state_dict(), os.path.join(save_dir, "ema.pt")) + model_sharding(ema) + + booster.save_optimizer(optimizer, os.path.join(save_dir, "optimizer"), shard=True, size_per_shard=4096) + if lr_scheduler is not None: + booster.save_lr_scheduler(lr_scheduler, os.path.join(save_dir, "lr_scheduler")) + running_states = { + "epoch": epoch, + "step": step, + "global_step": global_step, + "sample_start_index": step * batch_size, + } + if coordinator.is_master(): + save_json(running_states, os.path.join(save_dir, "running_states.json")) + dist.barrier() + + +def load( + booster: Booster, model: nn.Module, ema: nn.Module, optimizer: Optimizer, lr_scheduler: _LRScheduler, load_dir: str +) -> Tuple[int, int, int]: + booster.load_model(model, os.path.join(load_dir, "model")) + # ema is not boosted, so we don't use booster.load_model + # ema.load_state_dict(torch.load(os.path.join(load_dir, "ema.pt"))) + ema.load_state_dict(torch.load(os.path.join(load_dir, "ema.pt"), map_location=torch.device("cpu"))) + booster.load_optimizer(optimizer, os.path.join(load_dir, "optimizer")) + if lr_scheduler is not None: + booster.load_lr_scheduler(lr_scheduler, os.path.join(load_dir, "lr_scheduler")) + running_states = load_json(os.path.join(load_dir, "running_states.json")) + dist.barrier() + return running_states["epoch"], running_states["step"], running_states["sample_start_index"] + + +def create_logger(logging_dir): + """ + Create a logger that writes to a log file and stdout. + """ + if dist.get_rank() == 0: # real logger + logging.basicConfig( + level=logging.INFO, + format="[\033[34m%(asctime)s\033[0m] %(message)s", + datefmt="%Y-%m-%d %H:%M:%S", + handlers=[logging.StreamHandler(), logging.FileHandler(f"{logging_dir}/log.txt")], + ) + logger = logging.getLogger(__name__) + else: # dummy logger (does nothing) + logger = logging.getLogger(__name__) + logger.addHandler(logging.NullHandler()) + return logger + + +def load_checkpoint(model, ckpt_path, save_as_pt=True): + if ckpt_path.endswith(".pt") or ckpt_path.endswith(".pth"): + state_dict = find_model(ckpt_path) + missing_keys, unexpected_keys = model.load_state_dict(state_dict, strict=False) + print(f"Missing keys: {missing_keys}") + print(f"Unexpected keys: {unexpected_keys}") + elif os.path.isdir(ckpt_path): + load_from_sharded_state_dict(model, ckpt_path) + if save_as_pt: + save_path = os.path.join(ckpt_path, "model_ckpt.pt") + torch.save(model.state_dict(), save_path) + print(f"Model checkpoint saved to {save_path}") + else: + raise ValueError(f"Invalid checkpoint path: {ckpt_path}") diff --git a/opensora/utils/config_utils.py b/opensora/utils/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5ef815064b98b4a1cf35d2809fb3b0ced7af0c41 --- /dev/null +++ b/opensora/utils/config_utils.py @@ -0,0 +1,97 @@ +import argparse +import json +import os +from glob import glob + +from mmengine.config import Config +from torch.utils.tensorboard import SummaryWriter + + +def parse_args(training=False): + parser = argparse.ArgumentParser() + + # model config + parser.add_argument("config", help="model config file path") + + parser.add_argument("--seed", default=42, type=int, help="generation seed") + parser.add_argument("--ckpt-path", type=str, help="path to model ckpt; will overwrite cfg.ckpt_path if specified") + parser.add_argument("--batch-size", default=None, type=int, help="batch size") + + # ====================================================== + # Inference + # ====================================================== + + if not training: + # prompt + parser.add_argument("--prompt-path", default=None, type=str, help="path to prompt txt file") + parser.add_argument("--save-dir", default=None, type=str, help="path to save generated samples") + + # hyperparameters + parser.add_argument("--num-sampling-steps", default=None, type=int, help="sampling steps") + parser.add_argument("--cfg-scale", default=None, type=float, help="balance between cond & uncond") + else: + parser.add_argument("--wandb", default=None, type=bool, help="enable wandb") + parser.add_argument("--load", default=None, type=str, help="path to continue training") + parser.add_argument("--data-path", default=None, type=str, help="path to data csv") + + return parser.parse_args() + + +def merge_args(cfg, args, training=False): + if args.ckpt_path is not None: + cfg.model["from_pretrained"] = args.ckpt_path + args.ckpt_path = None + + if not training: + if args.cfg_scale is not None: + cfg.scheduler["cfg_scale"] = args.cfg_scale + args.cfg_scale = None + + if "multi_resolution" not in cfg: + cfg["multi_resolution"] = False + for k, v in vars(args).items(): + if k in cfg and v is not None: + cfg[k] = v + + return cfg + + +def parse_configs(training=False): + args = parse_args(training) + cfg = Config.fromfile(args.config) + cfg = merge_args(cfg, args, training) + return cfg + + +def create_experiment_workspace(cfg): + """ + This function creates a folder for experiment tracking. + + Args: + args: The parsed arguments. + + Returns: + exp_dir: The path to the experiment folder. + """ + # Make outputs folder (holds all experiment subfolders) + os.makedirs(cfg.outputs, exist_ok=True) + experiment_index = len(glob(f"{cfg.outputs}/*")) + + # Create an experiment folder + model_name = cfg.model["type"].replace("/", "-") + exp_name = f"{experiment_index:03d}-F{cfg.num_frames}S{cfg.frame_interval}-{model_name}" + exp_dir = f"{cfg.outputs}/{exp_name}" + os.makedirs(exp_dir, exist_ok=True) + return exp_name, exp_dir + + +def save_training_config(cfg, experiment_dir): + with open(f"{experiment_dir}/config.txt", "w") as f: + json.dump(cfg, f, indent=4) + + +def create_tensorboard_writer(exp_dir): + tensorboard_dir = f"{exp_dir}/tensorboard" + os.makedirs(tensorboard_dir, exist_ok=True) + writer = SummaryWriter(tensorboard_dir) + return writer diff --git a/opensora/utils/misc.py b/opensora/utils/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..d162526162ea0847400a9dc9d1ab39e45d8e5abf --- /dev/null +++ b/opensora/utils/misc.py @@ -0,0 +1,286 @@ +import collections +import importlib +import logging +import os +import time +from collections import OrderedDict +from collections.abc import Sequence +from itertools import repeat + +import numpy as np +import torch +import torch.distributed as dist + + +def print_rank(var_name, var_value, rank=0): + if dist.get_rank() == rank: + print(f"[Rank {rank}] {var_name}: {var_value}") + + +def print_0(*args, **kwargs): + if dist.get_rank() == 0: + print(*args, **kwargs) + + +def requires_grad(model: torch.nn.Module, flag: bool = True) -> None: + """ + Set requires_grad flag for all parameters in a model. + """ + for p in model.parameters(): + p.requires_grad = flag + + +def format_numel_str(numel: int) -> str: + B = 1024**3 + M = 1024**2 + K = 1024 + if numel >= B: + return f"{numel / B:.2f} B" + elif numel >= M: + return f"{numel / M:.2f} M" + elif numel >= K: + return f"{numel / K:.2f} K" + else: + return f"{numel}" + + +def all_reduce_mean(tensor: torch.Tensor) -> torch.Tensor: + dist.all_reduce(tensor=tensor, op=dist.ReduceOp.SUM) + tensor.div_(dist.get_world_size()) + return tensor + + +def get_model_numel(model: torch.nn.Module) -> (int, int): + num_params = 0 + num_params_trainable = 0 + for p in model.parameters(): + num_params += p.numel() + if p.requires_grad: + num_params_trainable += p.numel() + return num_params, num_params_trainable + + +def try_import(name): + """Try to import a module. + + Args: + name (str): Specifies what module to import in absolute or relative + terms (e.g. either pkg.mod or ..mod). + Returns: + ModuleType or None: If importing successfully, returns the imported + module, otherwise returns None. + """ + try: + return importlib.import_module(name) + except ImportError: + return None + + +def transpose(x): + """ + transpose a list of list + Args: + x (list[list]): + """ + ret = list(map(list, zip(*x))) + return ret + + +def get_timestamp(): + timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime(time.time())) + return timestamp + + +def format_time(seconds): + days = int(seconds / 3600 / 24) + seconds = seconds - days * 3600 * 24 + hours = int(seconds / 3600) + seconds = seconds - hours * 3600 + minutes = int(seconds / 60) + seconds = seconds - minutes * 60 + secondsf = int(seconds) + seconds = seconds - secondsf + millis = int(seconds * 1000) + + f = "" + i = 1 + if days > 0: + f += str(days) + "D" + i += 1 + if hours > 0 and i <= 2: + f += str(hours) + "h" + i += 1 + if minutes > 0 and i <= 2: + f += str(minutes) + "m" + i += 1 + if secondsf > 0 and i <= 2: + f += str(secondsf) + "s" + i += 1 + if millis > 0 and i <= 2: + f += str(millis) + "ms" + i += 1 + if f == "": + f = "0ms" + return f + + +def to_tensor(data): + """Convert objects of various python types to :obj:`torch.Tensor`. + + Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`, + :class:`Sequence`, :class:`int` and :class:`float`. + + Args: + data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to + be converted. + """ + + if isinstance(data, torch.Tensor): + return data + elif isinstance(data, np.ndarray): + return torch.from_numpy(data) + elif isinstance(data, Sequence) and not isinstance(data, str): + return torch.tensor(data) + elif isinstance(data, int): + return torch.LongTensor([data]) + elif isinstance(data, float): + return torch.FloatTensor([data]) + else: + raise TypeError(f"type {type(data)} cannot be converted to tensor.") + + +def to_ndarray(data): + if isinstance(data, torch.Tensor): + return data.numpy() + elif isinstance(data, np.ndarray): + return data + elif isinstance(data, Sequence): + return np.array(data) + elif isinstance(data, int): + return np.ndarray([data], dtype=int) + elif isinstance(data, float): + return np.array([data], dtype=float) + else: + raise TypeError(f"type {type(data)} cannot be converted to ndarray.") + + +def to_torch_dtype(dtype): + if isinstance(dtype, torch.dtype): + return dtype + elif isinstance(dtype, str): + dtype_mapping = { + "float64": torch.float64, + "float32": torch.float32, + "float16": torch.float16, + "fp32": torch.float32, + "fp16": torch.float16, + "half": torch.float16, + "bf16": torch.bfloat16, + } + if dtype not in dtype_mapping: + raise ValueError + dtype = dtype_mapping[dtype] + return dtype + else: + raise ValueError + + +def count_params(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def _ntuple(n): + def parse(x): + if isinstance(x, collections.abc.Iterable) and not isinstance(x, str): + return x + return tuple(repeat(x, n)) + + return parse + + +to_1tuple = _ntuple(1) +to_2tuple = _ntuple(2) +to_3tuple = _ntuple(3) +to_4tuple = _ntuple(4) +to_ntuple = _ntuple + + +def convert_SyncBN_to_BN2d(model_cfg): + for k in model_cfg: + v = model_cfg[k] + if k == "norm_cfg" and v["type"] == "SyncBN": + v["type"] = "BN2d" + elif isinstance(v, dict): + convert_SyncBN_to_BN2d(v) + + +def get_topk(x, dim=4, k=5): + x = to_tensor(x) + inds = x[..., dim].topk(k)[1] + return x[inds] + + +def param_sigmoid(x, alpha): + ret = 1 / (1 + (-alpha * x).exp()) + return ret + + +def inverse_param_sigmoid(x, alpha, eps=1e-5): + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) / alpha + + +def inverse_sigmoid(x, eps=1e-5): + """Inverse function of sigmoid. + + Args: + x (Tensor): The tensor to do the + inverse. + eps (float): EPS avoid numerical + overflow. Defaults 1e-5. + Returns: + Tensor: The x has passed the inverse + function of sigmoid, has same + shape with input. + """ + x = x.clamp(min=0, max=1) + x1 = x.clamp(min=eps) + x2 = (1 - x).clamp(min=eps) + return torch.log(x1 / x2) + + +def count_columns(df, columns): + cnt_dict = OrderedDict() + num_samples = len(df) + + for col in columns: + d_i = df[col].value_counts().to_dict() + for k in d_i: + d_i[k] = (d_i[k], d_i[k] / num_samples) + cnt_dict[col] = d_i + + return cnt_dict + + +def build_logger(work_dir, cfgname): + log_file = cfgname + ".log" + log_path = os.path.join(work_dir, log_file) + + logger = logging.getLogger(cfgname) + logger.setLevel(logging.INFO) + # formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + formatter = logging.Formatter("%(asctime)s: %(message)s", datefmt="%Y-%m-%d %H:%M:%S") + + handler1 = logging.FileHandler(log_path) + handler1.setFormatter(formatter) + + handler2 = logging.StreamHandler() + handler2.setFormatter(formatter) + + logger.addHandler(handler1) + logger.addHandler(handler2) + logger.propagate = False + + return logger diff --git a/opensora/utils/train_utils.py b/opensora/utils/train_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f84604392b7536aaf35f27f6aec24970e782a62b --- /dev/null +++ b/opensora/utils/train_utils.py @@ -0,0 +1,31 @@ +from collections import OrderedDict + +import torch + + +@torch.no_grad() +def update_ema( + ema_model: torch.nn.Module, model: torch.nn.Module, optimizer=None, decay: float = 0.9999, sharded: bool = True +) -> None: + """ + Step the EMA model towards the current model. + """ + ema_params = OrderedDict(ema_model.named_parameters()) + model_params = OrderedDict(model.named_parameters()) + + for name, param in model_params.items(): + if name == "pos_embed": + continue + if param.requires_grad == False: + continue + if not sharded: + param_data = param.data + ema_params[name].mul_(decay).add_(param_data, alpha=1 - decay) + else: + if param.data.dtype != torch.float32: + param_id = id(param) + master_param = optimizer._param_store.working_to_master_param[param_id] + param_data = master_param.data + else: + param_data = param.data + ema_params[name].mul_(decay).add_(param_data, alpha=1 - decay) diff --git a/scripts/inference.py b/scripts/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..900870be6aa4679bd6141c472ec83a245c7ab189 --- /dev/null +++ b/scripts/inference.py @@ -0,0 +1,112 @@ +import os + +import torch +import colossalai +import torch.distributed as dist +from mmengine.runner import set_random_seed + +from opensora.datasets import save_sample +from opensora.registry import MODELS, SCHEDULERS, build_module +from opensora.utils.config_utils import parse_configs +from opensora.utils.misc import to_torch_dtype +from opensora.acceleration.parallel_states import set_sequence_parallel_group +from colossalai.cluster import DistCoordinator + + +def load_prompts(prompt_path): + with open(prompt_path, "r") as f: + prompts = [line.strip() for line in f.readlines()] + return prompts + + +def main(): + # ====================================================== + # 1. cfg and init distributed env + # ====================================================== + cfg = parse_configs(training=False) + print(cfg) + + # init distributed + colossalai.launch_from_torch({}) + coordinator = DistCoordinator() + + if coordinator.world_size > 1: + set_sequence_parallel_group(dist.group.WORLD) + enable_sequence_parallelism = True + else: + enable_sequence_parallelism = False + + # ====================================================== + # 2. runtime variables + # ====================================================== + torch.set_grad_enabled(False) + torch.backends.cuda.matmul.allow_tf32 = True + torch.backends.cudnn.allow_tf32 = True + device = "cuda" if torch.cuda.is_available() else "cpu" + dtype = to_torch_dtype(cfg.dtype) + set_random_seed(seed=cfg.seed) + prompts = load_prompts(cfg.prompt_path) + + # ====================================================== + # 3. build model & load weights + # ====================================================== + # 3.1. build model + input_size = (cfg.num_frames, *cfg.image_size) + vae = build_module(cfg.vae, MODELS) + latent_size = vae.get_latent_size(input_size) + text_encoder = build_module(cfg.text_encoder, MODELS, device=device) # T5 must be fp32 + model = build_module( + cfg.model, + MODELS, + input_size=latent_size, + in_channels=vae.out_channels, + caption_channels=text_encoder.output_dim, + model_max_length=text_encoder.model_max_length, + dtype=dtype, + enable_sequence_parallelism=enable_sequence_parallelism, + ) + text_encoder.y_embedder = model.y_embedder # hack for classifier-free guidance + + # 3.2. move to device & eval + vae = vae.to(device, dtype).eval() + model = model.to(device, dtype).eval() + + # 3.3. build scheduler + scheduler = build_module(cfg.scheduler, SCHEDULERS) + + # 3.4. support for multi-resolution + model_args = dict() + if cfg.multi_resolution: + image_size = cfg.image_size + hw = torch.tensor([image_size], device=device, dtype=dtype).repeat(cfg.batch_size, 1) + ar = torch.tensor([[image_size[0] / image_size[1]]], device=device, dtype=dtype).repeat(cfg.batch_size, 1) + model_args["data_info"] = dict(ar=ar, hw=hw) + + # ====================================================== + # 4. inference + # ====================================================== + sample_idx = 0 + save_dir = cfg.save_dir + os.makedirs(save_dir, exist_ok=True) + for i in range(0, len(prompts), cfg.batch_size): + batch_prompts = prompts[i : i + cfg.batch_size] + samples = scheduler.sample( + model, + text_encoder, + z_size=(vae.out_channels, *latent_size), + prompts=batch_prompts, + device=device, + additional_args=model_args, + ) + samples = vae.decode(samples.to(dtype)) + + if coordinator.is_master(): + for idx, sample in enumerate(samples): + print(f"Prompt: {batch_prompts[idx]}") + save_path = os.path.join(save_dir, f"sample_{sample_idx}") + save_sample(sample, fps=cfg.fps, save_path=save_path) + sample_idx += 1 + + +if __name__ == "__main__": + main() diff --git a/scripts/train.py b/scripts/train.py new file mode 100644 index 0000000000000000000000000000000000000000..9f611b7d3c07ef1a4af0678e245c8914d2d485fb --- /dev/null +++ b/scripts/train.py @@ -0,0 +1,287 @@ +from copy import deepcopy + +import colossalai +import torch +import torch.distributed as dist +import wandb +from colossalai.booster import Booster +from colossalai.booster.plugin import LowLevelZeroPlugin +from colossalai.cluster import DistCoordinator +from colossalai.nn.optimizer import HybridAdam +from colossalai.utils import get_current_device +from tqdm import tqdm + +from opensora.acceleration.checkpoint import set_grad_checkpoint +from opensora.acceleration.parallel_states import ( + get_data_parallel_group, + set_data_parallel_group, + set_sequence_parallel_group, +) +from opensora.acceleration.plugin import ZeroSeqParallelPlugin +from opensora.datasets import DatasetFromCSV, get_transforms_image, get_transforms_video, prepare_dataloader +from opensora.registry import MODELS, SCHEDULERS, build_module +from opensora.utils.ckpt_utils import create_logger, load, model_sharding, record_model_param_shape, save +from opensora.utils.config_utils import ( + create_experiment_workspace, + create_tensorboard_writer, + parse_configs, + save_training_config, +) +from opensora.utils.misc import all_reduce_mean, format_numel_str, get_model_numel, requires_grad, to_torch_dtype +from opensora.utils.train_utils import update_ema + + +def main(): + # ====================================================== + # 1. args & cfg + # ====================================================== + cfg = parse_configs(training=True) + print(cfg) + exp_name, exp_dir = create_experiment_workspace(cfg) + save_training_config(cfg._cfg_dict, exp_dir) + + # ====================================================== + # 2. runtime variables & colossalai launch + # ====================================================== + assert torch.cuda.is_available(), "Training currently requires at least one GPU." + assert cfg.dtype in ["fp16", "bf16"], f"Unknown mixed precision {cfg.dtype}" + + # 2.1. colossalai init distributed training + colossalai.launch_from_torch({}) + coordinator = DistCoordinator() + device = get_current_device() + dtype = to_torch_dtype(cfg.dtype) + + # 2.2. init logger, tensorboard & wandb + if not coordinator.is_master(): + logger = create_logger(None) + else: + logger = create_logger(exp_dir) + logger.info(f"Experiment directory created at {exp_dir}") + + writer = create_tensorboard_writer(exp_dir) + if cfg.wandb: + wandb.init(project="minisora", name=exp_name, config=cfg._cfg_dict) + + # 2.3. initialize ColossalAI booster + if cfg.plugin == "zero2": + plugin = LowLevelZeroPlugin( + stage=2, + precision=cfg.dtype, + initial_scale=2**16, + max_norm=cfg.grad_clip, + ) + set_data_parallel_group(dist.group.WORLD) + elif cfg.plugin == "zero2-seq": + plugin = ZeroSeqParallelPlugin( + sp_size=cfg.sp_size, + stage=2, + precision=cfg.dtype, + initial_scale=2**16, + max_norm=cfg.grad_clip, + ) + set_sequence_parallel_group(plugin.sp_group) + set_data_parallel_group(plugin.dp_group) + else: + raise ValueError(f"Unknown plugin {cfg.plugin}") + booster = Booster(plugin=plugin) + + # ====================================================== + # 3. build dataset and dataloader + # ====================================================== + dataset = DatasetFromCSV( + cfg.data_path, + # TODO: change transforms + transform=( + get_transforms_video(cfg.image_size[0]) + if not cfg.use_image_transform + else get_transforms_image(cfg.image_size[0]) + ), + num_frames=cfg.num_frames, + frame_interval=cfg.frame_interval, + root=cfg.root, + ) + + # TODO: use plugin's prepare dataloader + # a batch contains: + # { + # "video": torch.Tensor, # [B, C, T, H, W], + # "text": List[str], + # } + dataloader = prepare_dataloader( + dataset, + batch_size=cfg.batch_size, + num_workers=cfg.num_workers, + shuffle=True, + drop_last=True, + pin_memory=True, + process_group=get_data_parallel_group(), + ) + logger.info(f"Dataset contains {len(dataset):,} videos ({cfg.data_path})") + + total_batch_size = cfg.batch_size * dist.get_world_size() // cfg.sp_size + logger.info(f"Total batch size: {total_batch_size}") + + # ====================================================== + # 4. build model + # ====================================================== + # 4.1. build model + input_size = (cfg.num_frames, *cfg.image_size) + vae = build_module(cfg.vae, MODELS) + latent_size = vae.get_latent_size(input_size) + text_encoder = build_module(cfg.text_encoder, MODELS, device=device) # T5 must be fp32 + model = build_module( + cfg.model, + MODELS, + input_size=latent_size, + in_channels=vae.out_channels, + caption_channels=text_encoder.output_dim, + model_max_length=text_encoder.model_max_length, + dtype=dtype, + ) + model_numel, model_numel_trainable = get_model_numel(model) + logger.info( + f"Trainable model params: {format_numel_str(model_numel_trainable)}, Total model params: {format_numel_str(model_numel)}" + ) + + # 4.2. create ema + ema = deepcopy(model).to(torch.float32).to(device) + requires_grad(ema, False) + ema_shape_dict = record_model_param_shape(ema) + + # 4.3. move to device + vae = vae.to(device, dtype) + model = model.to(device, dtype) + + # 4.4. build scheduler + scheduler = build_module(cfg.scheduler, SCHEDULERS) + + # 4.5. setup optimizer + optimizer = HybridAdam( + filter(lambda p: p.requires_grad, model.parameters()), lr=cfg.lr, weight_decay=0, adamw_mode=True + ) + lr_scheduler = None + + # 4.6. prepare for training + if cfg.grad_checkpoint: + set_grad_checkpoint(model) + model.train() + update_ema(ema, model, decay=0, sharded=False) + ema.eval() + + # ======================================================= + # 5. boost model for distributed training with colossalai + # ======================================================= + torch.set_default_dtype(dtype) + model, optimizer, _, dataloader, lr_scheduler = booster.boost( + model=model, optimizer=optimizer, lr_scheduler=lr_scheduler, dataloader=dataloader + ) + torch.set_default_dtype(torch.float) + num_steps_per_epoch = len(dataloader) + logger.info("Boost model for distributed training") + + # ======================================================= + # 6. training loop + # ======================================================= + start_epoch = start_step = log_step = sampler_start_idx = 0 + running_loss = 0.0 + + # 6.1. resume training + if cfg.load is not None: + logger.info("Loading checkpoint") + start_epoch, start_step, sampler_start_idx = load(booster, model, ema, optimizer, lr_scheduler, cfg.load) + logger.info(f"Loaded checkpoint {cfg.load} at epoch {start_epoch} step {start_step}") + logger.info(f"Training for {cfg.epochs} epochs with {num_steps_per_epoch} steps per epoch") + + dataloader.sampler.set_start_index(sampler_start_idx) + model_sharding(ema) + + # 6.2. training loop + for epoch in range(start_epoch, cfg.epochs): + dataloader.sampler.set_epoch(epoch) + dataloader_iter = iter(dataloader) + logger.info(f"Beginning epoch {epoch}...") + + with tqdm( + range(start_step, num_steps_per_epoch), + desc=f"Epoch {epoch}", + disable=not coordinator.is_master(), + total=num_steps_per_epoch, + initial=start_step, + ) as pbar: + for step in pbar: + batch = next(dataloader_iter) + x = batch["video"].to(device, dtype) # [B, C, T, H, W] + y = batch["text"] + + with torch.no_grad(): + # Prepare visual inputs + x = vae.encode(x) # [B, C, T, H/P, W/P] + # Prepare text inputs + model_args = text_encoder.encode(y) + + # Diffusion + t = torch.randint(0, scheduler.num_timesteps, (x.shape[0],), device=device) + loss_dict = scheduler.training_losses(model, x, t, model_args) + + # Backward & update + loss = loss_dict["loss"].mean() + booster.backward(loss=loss, optimizer=optimizer) + optimizer.step() + optimizer.zero_grad() + + # Update EMA + update_ema(ema, model.module, optimizer=optimizer) + + # Log loss values: + all_reduce_mean(loss) + running_loss += loss.item() + global_step = epoch * num_steps_per_epoch + step + log_step += 1 + + # Log to tensorboard + if coordinator.is_master() and (global_step + 1) % cfg.log_every == 0: + avg_loss = running_loss / log_step + pbar.set_postfix({"loss": avg_loss, "step": step, "global_step": global_step}) + running_loss = 0 + log_step = 0 + writer.add_scalar("loss", loss.item(), global_step) + if cfg.wandb: + wandb.log( + { + "iter": global_step, + "num_samples": global_step * total_batch_size, + "epoch": epoch, + "loss": loss.item(), + "avg_loss": avg_loss, + }, + step=global_step, + ) + + # Save checkpoint + if cfg.ckpt_every > 0 and (global_step + 1) % cfg.ckpt_every == 0: + save( + booster, + model, + ema, + optimizer, + lr_scheduler, + epoch, + step + 1, + global_step + 1, + cfg.batch_size, + coordinator, + exp_dir, + ema_shape_dict, + ) + logger.info( + f"Saved checkpoint at epoch {epoch} step {step + 1} global_step {global_step + 1} to {exp_dir}" + ) + + # the continue epochs are not resumed, so we need to reset the sampler start index and start step + dataloader.sampler.set_start_index(0) + start_step = 0 + + +if __name__ == "__main__": + main() diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..45049bbaeda5c2c7fd79e99d16683173dca153dd --- /dev/null +++ b/setup.py @@ -0,0 +1,60 @@ +from typing import List + +from setuptools import find_packages, setup + + +def fetch_requirements(path) -> List[str]: + """ + This function reads the requirements file. + + Args: + path (str): the path to the requirements file. + + Returns: + The lines in the requirements file. + """ + with open(path, "r") as fd: + return [r.strip() for r in fd.readlines()] + + +def fetch_readme() -> str: + """ + This function reads the README.md file in the current directory. + + Returns: + The lines in the README file. + """ + with open("README.md", encoding="utf-8") as f: + return f.read() + + +setup( + name="opensora", + version="1.0.0", + packages=find_packages( + exclude=( + "assets", + "configs", + "docs", + "outputs", + "pretrained_models", + "scripts", + "tests", + "tools", + "*.egg-info", + ) + ), + description="Democratizing Efficient Video Production for All", + long_description=fetch_readme(), + long_description_content_type="text/markdown", + license="Apache Software License 2.0", + install_requires=fetch_requirements("requirements.txt"), + python_requires=">=3.6", + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: Apache Software License", + "Environment :: GPU :: NVIDIA CUDA", + "Topic :: Scientific/Engineering :: Artificial Intelligence", + "Topic :: System :: Distributed Computing", + ], +) diff --git a/tests/test_seq_parallel_attention.py b/tests/test_seq_parallel_attention.py new file mode 100644 index 0000000000000000000000000000000000000000..00966ad013fb330d0e3a767013a8c0e9cfb6b9d4 --- /dev/null +++ b/tests/test_seq_parallel_attention.py @@ -0,0 +1,149 @@ +import colossalai +import torch +import torch.distributed as dist +from colossalai.testing import spawn + +from opensora.acceleration.communications import gather_forward_split_backward, split_forward_gather_backward +from opensora.acceleration.parallel_states import set_sequence_parallel_group +from opensora.models.layers.blocks import ( + Attention, + MultiHeadCrossAttention, + SeqParallelAttention, + SeqParallelMultiHeadCrossAttention, +) + + +def run_attention(rank, world_size): + # create model + torch.manual_seed(1024) + set_sequence_parallel_group(dist.group.WORLD) + + seq_parallel_attention = SeqParallelAttention(dim=256, num_heads=4, qkv_bias=True, enable_flashattn=False).cuda() + + torch.manual_seed(1024) + attention = Attention( + dim=256, + num_heads=4, + qkv_bias=True, + enable_flashattn=False, + ).cuda() + + # create inputs + torch.manual_seed(1024) + x = torch.randn(4, 64, 256).cuda() + seq_x = x.clone().detach() + + x.requires_grad = True + x.retain_grad() + seq_x.requires_grad = True + seq_x.retain_grad() + + sub_seq_x = split_forward_gather_backward(seq_x, dist.group.WORLD, dim=1, grad_scale="down") + + # run model + out = attention(x) + sub_seq_out = seq_parallel_attention(sub_seq_x) + seq_out = gather_forward_split_backward(sub_seq_out, dist.group.WORLD, dim=1, grad_scale="up") + + assert torch.allclose(seq_out, out, atol=1e-7), f"{seq_out}\nvs\n{out}" + + # run backward + seq_out.mean().backward() + out.mean().backward() + + # all reduce gradient for sp + for p in seq_parallel_attention.parameters(): + if p.grad is not None: + dist.all_reduce(p.grad, group=dist.group.WORLD) + p.grad.div_(world_size) + + # check grad + for p1, p2 in zip(seq_parallel_attention.parameters(), attention.parameters()): + assert torch.allclose(p1.grad, p2.grad, atol=1e-7), f"{p1.grad}\nvs\n{p2.grad}" + + # check input grad + assert torch.allclose(x.grad, seq_x.grad, atol=1e-7), f"{x.grad}\nvs\n{seq_x.grad}" + + +def run_cross_attention(rank, world_size): + # create model + torch.manual_seed(1024) + set_sequence_parallel_group(dist.group.WORLD) + seq_parallel_attention = SeqParallelMultiHeadCrossAttention( + d_model=256, + num_heads=4, + ).cuda().to(torch.bfloat16) + + torch.manual_seed(1024) + attention = MultiHeadCrossAttention( + d_model=256, + num_heads=4, + ).cuda().to(torch.bfloat16) + + # make sure the weights are the same + for p1, p2 in zip(seq_parallel_attention.parameters(), attention.parameters()): + p1.data.copy_(p2.data) + + # create inputs + torch.manual_seed(1024) + x = torch.randn(4, 64, 256).cuda().to(torch.bfloat16) + y = torch.randn(4, 32, 256).cuda().to(torch.bfloat16) + + mask = [2, 10, 8, 16] + mask = None + seq_x = x.clone().detach() + seq_y = y.clone().detach() + + # set grad + x.requires_grad = True + x.retain_grad() + seq_x.requires_grad = True + seq_x.retain_grad() + y.requires_grad = True + y.retain_grad() + seq_y.requires_grad = True + seq_y.retain_grad() + + # split by sequence + sub_seq_x = split_forward_gather_backward(seq_x, dist.group.WORLD, dim=1, grad_scale="down") + + # run model + out = attention(x, y, mask) + sub_seq_out = seq_parallel_attention(sub_seq_x, seq_y, mask) + seq_out = gather_forward_split_backward(sub_seq_out, dist.group.WORLD, dim=1, grad_scale="up") + + assert torch.allclose(seq_out, out, rtol=1e-5, atol=1e-6), f"\n{seq_out}\nvs\n{out}" + + # run backward + seq_out.mean().backward() + out.mean().backward() + + # all reduce gradient for sp + for name, p in seq_parallel_attention.named_parameters(): + if p.grad is not None: + dist.all_reduce(p.grad, group=dist.group.WORLD) + p.grad.div_(world_size) + else: + print(f"grad of {name} is None") + + # # check grad + for p1, p2 in zip(seq_parallel_attention.named_parameters(), attention.named_parameters()): + assert torch.allclose(p1[1].grad, p2[1].grad, rtol=1e-3, atol=1e-4), f"\n{p1[0]}\nvs\n{p2[0]}:\n{p1[1].grad}\nvs\n{p2[1].grad}" + + # # check input grad + assert torch.allclose(x.grad, seq_x.grad, atol=1e-7), f"{x.grad}\nvs\n{seq_x.grad}" + assert torch.allclose(y.grad, seq_y.grad, atol=1e-7), f"{y.grad}\nvs\n{seq_y.grad}" + + +def run_dist(rank, world_size, port): + colossalai.launch({}, rank=rank, world_size=world_size, host="localhost", port=port) + # run_attention(rank, world_size) + run_cross_attention(rank, world_size) + + +def test_seq_parallel_attention(): + spawn(run_dist, nprocs=2) + + +if __name__ == "__main__": + test_seq_parallel_attention() diff --git a/tests/test_t5_shardformer.py b/tests/test_t5_shardformer.py new file mode 100644 index 0000000000000000000000000000000000000000..68040ab39e57d7b8508e7eb4c2d330d7492f30ea --- /dev/null +++ b/tests/test_t5_shardformer.py @@ -0,0 +1,71 @@ +import time +from copy import deepcopy + +import colossalai +import torch +from colossalai.shardformer import ShardConfig, ShardFormer +from colossalai.testing import spawn + +from opensora.acceleration.shardformer.policy.t5_encoder import T5EncoderPolicy +from opensora.models.text_encoder.t5 import T5Embedder + + +def run_t5_encoder(rank, world_size, port): + colossalai.launch({}, rank=rank, world_size=world_size, port=port, host="localhost") + + # t5 embedder + t5_path = "./pretrained_models/t5_ckpts" + hf_t5 = T5Embedder(device="cuda", local_cache=True, cache_dir=t5_path, torch_dtype=torch.float) + sf_t5 = deepcopy(hf_t5) + + # create huggingface model as normal + shard_config = ShardConfig( + tensor_parallel_process_group=None, + pipeline_stage_manager=None, + enable_tensor_parallelism=False, + enable_fused_normalization=False, + enable_flash_attention=False, + enable_jit_fused=True, + enable_sequence_parallelism=False, + enable_sequence_overlap=False, + ) + shard_former = ShardFormer(shard_config=shard_config) + sharded_model, _ = shard_former.optimize(sf_t5.model, policy=T5EncoderPolicy()) + sf_t5.model = sharded_model + + # test t5 embedder + texts = ["Who is the best player in the history of NBA?", "How to study computer science?"] + for i in range(5): + hf_embs, hf_masks = hf_t5.get_text_embeddings(texts) + sf_embs, sf_masks = sf_t5.get_text_embeddings(texts) + + # check accuracy + assert torch.allclose(hf_embs, sf_embs, rtol=1e-4, atol=1e-5), f"{hf_embs} \nvs\n{sf_embs}" + assert torch.allclose(hf_masks, sf_masks), f"{hf_masks} \nvs\n{sf_masks}" + + # measure perf + torch.cuda.synchronize() + hf_start = time.time() + for i in range(20): + hf_embs, hf_masks = hf_t5.get_text_embeddings(texts) + torch.cuda.synchronize() + hf_end = time.time() + + # convert sf to fp16 + hf_t5.model = hf_t5.model.half() + torch.cuda.synchronize() + sf_start = time.time() + for i in range(20): + hf_embs, hf_masks = hf_t5.get_text_embeddings(texts) + torch.cuda.synchronize() + sf_end = time.time() + + print(f"[Performance] native: {hf_end - hf_start}s, shardformer: {sf_end - sf_start} s") + + +def test_t5_encoder(): + spawn(run_t5_encoder) + + +if __name__ == "__main__": + test_t5_encoder() diff --git a/tools/__init__.py b/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/caption/README.md b/tools/caption/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e9289d0550d49fea9b5a0e6e3ce12f3501e78714 --- /dev/null +++ b/tools/caption/README.md @@ -0,0 +1,25 @@ +# Video Captioning + +Human labeling of videos is expensive and time-consuming. We adopt powerful image captioning models to generate captions for videos. Although GPT-4V achieves a better performance, its 20s/sample speed is too slow for us. With batch inference, we can achieve a speed of 3s/sample with LLaVA, and the quality is comparable. LLaVA is the second best open-source model in [MMMU](https://mmmu-benchmark.github.io/) and accepts any resolution. + +![Caption](https://i0.imgs.ovh/2024/03/16/eXdvC.png) + +## GPT-4V Captioning + +Run the following command to generate captions for videos with GPT-4V: + +```bash +python -m tools.caption.caption_gpt4 FOLDER_WITH_VIDEOS output.csv --key $OPENAI_API_KEY +``` + +The cost is approximately $0.01 per video (3 frames per video). The output is a CSV file with path and caption. + +## LLaVA Captioning + +First, install LLaVA according to their [official instructions](https://github.com/haotian-liu/LLaVA?tab=readme-ov-file#install). We use the `liuhaotian/llava-v1.6-34b` model for captioning, which can be download [here](https://huggingface.co/liuhaotian/llava-v1.6-vicuna-7b). Then, run the following command to generate captions for videos with LLaVA: + +```bash +CUDA_VISIBLE_DEVICES=0,1 python -m tools.caption.caption_llava samples output.csv +``` + +The Yi-34B requires 2 80GB GPUs and 3s/sample. The output is a CSV file with path and caption. diff --git a/tools/caption/__init__.py b/tools/caption/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/caption/caption_gpt4.py b/tools/caption/caption_gpt4.py new file mode 100644 index 0000000000000000000000000000000000000000..b2c7590f15b05993d463defc9615c401090ed953 --- /dev/null +++ b/tools/caption/caption_gpt4.py @@ -0,0 +1,69 @@ +import argparse +import csv +import os + +import requests +import tqdm + +from .utils import extract_frames, prompts, read_video_list + + +def get_caption(frame, prompt, api_key): + headers = {"Content-Type": "application/json", "Authorization": f"Bearer {api_key}"} + payload = { + "model": "gpt-4-vision-preview", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": prompt, + }, + {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame[0]}"}}, + {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame[1]}"}}, + {"type": "image_url", "image_url": {"url": f"data:image/jpeg;base64,{frame[2]}"}}, + ], + } + ], + "max_tokens": 300, + } + response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload, timeout=60) + caption = response.json()["choices"][0]["message"]["content"] + caption = caption.replace("\n", " ") + return caption + + +def main(args): + # ====================================================== + # 1. read video list + # ====================================================== + videos = read_video_list(args.video_folder, args.output_file) + f = open(args.output_file, "a") + writer = csv.writer(f) + + # ====================================================== + # 2. generate captions + # ====================================================== + for video in tqdm.tqdm(videos): + video_path = os.path.join(args.video_folder, video) + frame, length = extract_frames(video_path, base_64=True) + if len(frame) < 3: + continue + + prompt = prompts[args.prompt] + caption = get_caption(frame, prompt, args.key) + + writer.writerow((video, caption, length)) + f.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("video_folder", type=str) + parser.add_argument("output_file", type=str) + parser.add_argument("--prompt", type=str, default="three_frames") + parser.add_argument("--key", type=str) + args = parser.parse_args() + + main(args) diff --git a/tools/caption/caption_llava.py b/tools/caption/caption_llava.py new file mode 100644 index 0000000000000000000000000000000000000000..8f4278c3bd7089b115123b41e53856e595156080 --- /dev/null +++ b/tools/caption/caption_llava.py @@ -0,0 +1,352 @@ +import argparse +import csv +import os +import warnings + +import torch +from llava.constants import DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX +from llava.conversation import conv_templates +from llava.mm_utils import get_anyres_image_grid_shape, get_model_name_from_path, process_images, tokenizer_image_token +from llava.model.builder import load_pretrained_model +from llava.model.llava_arch import unpad_image +from llava.utils import disable_torch_init +from tqdm import tqdm + +from .utils import extract_frames, prompts, read_video_list + +disable_torch_init() + + +def prepare_inputs_labels_for_multimodal( + self, input_ids, position_ids, attention_mask, past_key_values, labels, images, image_sizes=None +): + # llava_arch.py + vision_tower = self.get_vision_tower() + if vision_tower is None or images is None or input_ids.shape[1] == 1: + return input_ids, position_ids, attention_mask, past_key_values, None, labels + + if type(images) is list or images.ndim == 5: + if type(images) is list: + images = [x.unsqueeze(0) if x.ndim == 3 else x for x in images] + concat_images = torch.cat([image for image in images], dim=0) + image_features = self.encode_images(concat_images) + split_sizes = [image.shape[0] for image in images] + image_features = torch.split(image_features, split_sizes, dim=0) + mm_patch_merge_type = getattr(self.config, "mm_patch_merge_type", "flat") + image_aspect_ratio = getattr(self.config, "image_aspect_ratio", "square") + if mm_patch_merge_type == "flat": + image_features = [x.flatten(0, 1) for x in image_features] + elif mm_patch_merge_type.startswith("spatial"): + new_image_features = [] + for image_idx, image_feature in enumerate(image_features): + if image_feature.shape[0] > 1: + base_image_feature = image_feature[0] + image_feature = image_feature[1:] + height = width = self.get_vision_tower().num_patches_per_side + assert height * width == base_image_feature.shape[0] + if image_aspect_ratio == "anyres": + num_patch_width, num_patch_height = get_anyres_image_grid_shape( + image_sizes[image_idx], + self.config.image_grid_pinpoints, + self.get_vision_tower().config.image_size, + ) + image_feature = image_feature.view(num_patch_height, num_patch_width, height, width, -1) + else: + raise NotImplementedError + if "unpad" in mm_patch_merge_type: + image_feature = image_feature.permute(4, 0, 2, 1, 3).contiguous() + image_feature = image_feature.flatten(1, 2).flatten(2, 3) + image_feature = unpad_image(image_feature, image_sizes[image_idx]) + image_feature = torch.cat( + ( + image_feature, + self.model.image_newline[:, None, None] + .expand(*image_feature.shape[:-1], 1) + .to(image_feature.device), + ), + dim=-1, + ) + image_feature = image_feature.flatten(1, 2).transpose(0, 1) + else: + image_feature = image_feature.permute(0, 2, 1, 3, 4).contiguous() + image_feature = image_feature.flatten(0, 3) + image_feature = torch.cat((base_image_feature, image_feature), dim=0) + else: + image_feature = image_feature[0] + if "unpad" in mm_patch_merge_type: + image_feature = torch.cat( + (image_feature, self.model.image_newline[None].to(image_feature.device)), dim=0 + ) + new_image_features.append(image_feature) + image_features = new_image_features + else: + raise ValueError(f"Unexpected mm_patch_merge_type: {self.config.mm_patch_merge_type}") + else: + image_features = self.encode_images(images) + + # TODO: image start / end is not implemented here to support pretraining. + if getattr(self.config, "tune_mm_mlp_adapter", False) and getattr(self.config, "mm_use_im_start_end", False): + raise NotImplementedError + + # Let's just add dummy tensors if they do not exist, + # it is a headache to deal with None all the time. + # But it is not ideal, and if you have a better idea, + # please open an issue / submit a PR, thanks. + _labels = labels + _position_ids = position_ids + _attention_mask = attention_mask + if attention_mask is None: + attention_mask = torch.ones_like(input_ids, dtype=torch.bool) + else: + attention_mask = attention_mask.bool() + if position_ids is None: + position_ids = torch.arange(0, input_ids.shape[1], dtype=torch.long, device=input_ids.device) + if labels is None: + labels = torch.full_like(input_ids, IGNORE_INDEX) + + # remove the padding using attention_mask -- FIXME + input_ids = [ + cur_input_ids[cur_attention_mask] for cur_input_ids, cur_attention_mask in zip(input_ids, attention_mask) + ] + labels = [cur_labels[cur_attention_mask] for cur_labels, cur_attention_mask in zip(labels, attention_mask)] + + new_input_embeds = [] + new_labels = [] + cur_image_idx = 0 + for batch_idx, cur_input_ids in enumerate(input_ids): + num_images = (cur_input_ids == IMAGE_TOKEN_INDEX).sum() + if num_images == 0: + cur_image_features = image_features[cur_image_idx] + cur_input_embeds_1 = self.get_model().embed_tokens(cur_input_ids) + cur_input_embeds = torch.cat([cur_input_embeds_1, cur_image_features[0:0]], dim=0) + new_input_embeds.append(cur_input_embeds) + new_labels.append(labels[batch_idx]) + cur_image_idx += 1 + continue + + image_token_indices = ( + [-1] + torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0].tolist() + [cur_input_ids.shape[0]] + ) + cur_input_ids_noim = [] + cur_labels = labels[batch_idx] + cur_labels_noim = [] + for i in range(len(image_token_indices) - 1): + cur_input_ids_noim.append(cur_input_ids[image_token_indices[i] + 1 : image_token_indices[i + 1]]) + cur_labels_noim.append(cur_labels[image_token_indices[i] + 1 : image_token_indices[i + 1]]) + split_sizes = [x.shape[0] for x in cur_labels_noim] + cur_input_embeds = self.get_model().embed_tokens(torch.cat(cur_input_ids_noim)) + cur_input_embeds_no_im = torch.split(cur_input_embeds, split_sizes, dim=0) + cur_new_input_embeds = [] + cur_new_labels = [] + + for i in range(num_images + 1): + cur_new_input_embeds.append(cur_input_embeds_no_im[i]) + cur_new_labels.append(cur_labels_noim[i]) + if i < num_images: + cur_image_features = image_features[cur_image_idx] + cur_image_idx += 1 + cur_new_input_embeds.append(cur_image_features) + cur_new_labels.append( + torch.full( + (cur_image_features.shape[0],), + IGNORE_INDEX, + device=cur_labels.device, + dtype=cur_labels.dtype, + ) + ) + + cur_new_input_embeds = [x.to(self.device) for x in cur_new_input_embeds] + + cur_new_input_embeds = torch.cat(cur_new_input_embeds) + cur_new_labels = torch.cat(cur_new_labels) + + new_input_embeds.append(cur_new_input_embeds) + new_labels.append(cur_new_labels) + + # Truncate sequences to max length as image embeddings can make the sequence longer + tokenizer_model_max_length = getattr(self.config, "tokenizer_model_max_length", None) + if tokenizer_model_max_length is not None: + new_input_embeds = [x[:tokenizer_model_max_length] for x in new_input_embeds] + new_labels = [x[:tokenizer_model_max_length] for x in new_labels] + + # Combine them + max_len = max(x.shape[0] for x in new_input_embeds) + batch_size = len(new_input_embeds) + + new_input_embeds_padded = [] + new_labels_padded = torch.full( + (batch_size, max_len), IGNORE_INDEX, dtype=new_labels[0].dtype, device=new_labels[0].device + ) + attention_mask = torch.zeros((batch_size, max_len), dtype=attention_mask.dtype, device=attention_mask.device) + position_ids = torch.zeros((batch_size, max_len), dtype=position_ids.dtype, device=position_ids.device) + + for i, (cur_new_embed, cur_new_labels) in enumerate(zip(new_input_embeds, new_labels)): + cur_len = cur_new_embed.shape[0] + if getattr(self.config, "tokenizer_padding_side", "right") == "left": + new_input_embeds_padded.append( + torch.cat( + ( + torch.zeros( + (max_len - cur_len, cur_new_embed.shape[1]), + dtype=cur_new_embed.dtype, + device=cur_new_embed.device, + ), + cur_new_embed, + ), + dim=0, + ) + ) + if cur_len > 0: + new_labels_padded[i, -cur_len:] = cur_new_labels + attention_mask[i, -cur_len:] = True + position_ids[i, -cur_len:] = torch.arange( + 0, cur_len, dtype=position_ids.dtype, device=position_ids.device + ) + else: + new_input_embeds_padded.append( + torch.cat( + ( + cur_new_embed, + torch.zeros( + (max_len - cur_len, cur_new_embed.shape[1]), + dtype=cur_new_embed.dtype, + device=cur_new_embed.device, + ), + ), + dim=0, + ) + ) + if cur_len > 0: + new_labels_padded[i, :cur_len] = cur_new_labels + attention_mask[i, :cur_len] = True + position_ids[i, :cur_len] = torch.arange( + 0, cur_len, dtype=position_ids.dtype, device=position_ids.device + ) + + new_input_embeds = torch.stack(new_input_embeds_padded, dim=0) + + if _labels is None: + new_labels = None + else: + new_labels = new_labels_padded + + if _attention_mask is None: + attention_mask = None + else: + attention_mask = attention_mask.to(dtype=_attention_mask.dtype) + + if _position_ids is None: + position_ids = None + + return None, position_ids, attention_mask, past_key_values, new_input_embeds, new_labels + + +@torch.inference_mode() +def main(args): + # ====================================================== + # 1. read video list + # ====================================================== + videos = read_video_list(args.video_folder, args.output_file) + f = open(args.output_file, "a") + writer = csv.writer(f) + + # ====================================================== + # 2. load model and prepare prompts + # ====================================================== + model_path = "liuhaotian/llava-v1.6-34b" + query = prompts[args.prompt] + print(f"Prompt: {query}") + conv = conv_templates["chatml_direct"].copy() + conv.append_message(conv.roles[0], DEFAULT_IMAGE_TOKEN + "\n" + query) + prompt = conv.get_prompt() + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") # Pytorch non-meta copying warning fills out the console + tokenizer, model, image_processor, context_len = load_pretrained_model( + model_path=model_path, + model_base=None, + model_name=get_model_name_from_path(model_path), + ) + input_ids = tokenizer_image_token(prompt, tokenizer, IMAGE_TOKEN_INDEX, return_tensors="pt") + input_ids = input_ids.unsqueeze(0).to(model.device) + + # ====================================================== + # 3. generate captions + # ====================================================== + bs = args.bs + for i in tqdm(range(0, len(videos), bs)): + # prepare a batch of inputs + video_files = videos[i : i + bs] + frames = [] + video_lengths = [] + for video_file in video_files: + frame, length = extract_frames(os.path.join(args.video_folder, video_file)) + if len(frame) < 3: + continue + frames.append(frame) + video_lengths.append(length) + if len(frames) == 0: + continue + + # encode the batch of inputs + samples = [] + for imgs in frames: + imgs_size = [img.size for img in imgs] + imgs = process_images(imgs, image_processor, model.config) + imgs = imgs.to(model.device, dtype=torch.float16) + with torch.inference_mode(): + _, _, _, _, inputs_embeds, _ = prepare_inputs_labels_for_multimodal( + model, input_ids, None, None, None, None, images=imgs, image_sizes=imgs_size + ) + samples.append(inputs_embeds) + + # padding + max_len = max([sample.shape[1] for sample in samples]) + attention_mask = torch.tensor( + [[0] * (max_len - samples[i].shape[1]) + [1] * samples[i].shape[1] for i in range(len(samples))] + ).to(model.device) + inputs_embeds = [ + torch.cat( + [ + torch.zeros( + (1, max_len - samples[i].shape[1], samples[i].shape[-1]), + device=model.device, + dtype=torch.float16, + ), + samples[i], + ], + dim=1, + ) + for i in range(len(samples)) + ] + inputs_embeds = torch.cat(inputs_embeds, dim=0) + + # generate outputs + output_ids = super(type(model), model).generate( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + do_sample=True, + temperature=0.2, + max_new_tokens=512, + use_cache=True, + ) + outputs = tokenizer.batch_decode(output_ids, skip_special_tokens=True) + outputs = [output.replace("\n", " ").strip() for output in outputs] + + # save results + result = list(zip(video_files, outputs, video_lengths)) + for t in result: + writer.writerow(t) + + f.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("video_folder", type=str) + parser.add_argument("output_file", type=str) + parser.add_argument("--bs", type=int, default=32) + parser.add_argument("--prompt", type=str, default="three_frames") + args = parser.parse_args() + + main(args) diff --git a/tools/caption/utils.py b/tools/caption/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3912f0ccd1811fae507d1da2169190324dc20f30 --- /dev/null +++ b/tools/caption/utils.py @@ -0,0 +1,67 @@ +import base64 +import csv +import os + +import cv2 +from PIL import Image + +prompts = { + "naive": "Describe the video", + "three_frames": "A video is given by providing three frames in chronological order. Describe this video and its style to generate a description. Pay attention to all objects in the video. Do not describe each frame individually. Do not reply with words like 'first frame'. The description should be useful for AI to re-generate the video. The description should be less than six sentences. Here are some examples of good descriptions: 1. A stylish woman walks down a Tokyo street filled with warm glowing neon and animated city signage. She wears a black leather jacket, a long red dress, and black boots, and carries a black purse. She wears sunglasses and red lipstick. She walks confidently and casually. The street is damp and reflective, creating a mirror effect of the colorful lights. Many pedestrians walk about. 2. Several giant wooly mammoths approach treading through a snowy meadow, their long wooly fur lightly blows in the wind as they walk, snow covered trees and dramatic snow capped mountains in the distance, mid afternoon light with wispy clouds and a sun high in the distance creates a warm glow, the low camera view is stunning capturing the large furry mammal with beautiful photography, depth of field. 3. Drone view of waves crashing against the rugged cliffs along Big Sur's garay point beach. The crashing blue waters create white-tipped waves, while the golden light of the setting sun illuminates the rocky shore. A small island with a lighthouse sits in the distance, and green shrubbery covers the cliff's edge. The steep drop from the road down to the beach is a dramatic feat, with the cliff’s edges jutting out over the sea. This is a view that captures the raw beauty of the coast and the rugged landscape of the Pacific Coast Highway.", +} + + +def get_filelist(file_path): + Filelist = [] + VID_EXTENSIONS = ("mp4", "avi", "mov", "mkv") + for home, dirs, files in os.walk(file_path): + for filename in files: + ext = filename.split(".")[-1] + if ext in VID_EXTENSIONS: + Filelist.append(filename) + return Filelist + + +def get_video_length(cap): + return int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + +def encode_image(image_path): + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") + + +def extract_frames(video_path, points=(0.2, 0.5, 0.8), base_64=False): + cap = cv2.VideoCapture(video_path) + length = get_video_length(cap) + points = [int(length * point) for point in points] + frames = [] + if length < 3: + return frames, length + for point in points: + cap.set(cv2.CAP_PROP_POS_FRAMES, point) + ret, frame = cap.read() + if not base_64: + frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + frame = Image.fromarray(frame) + else: + _, buffer = cv2.imencode(".jpg", frame) + frame = base64.b64encode(buffer).decode("utf-8") + frames.append(frame) + return frames, length + + +def read_video_list(video_folder, output_file): + processed_videos = [] + if os.path.exists(output_file): + with open(output_file, "r") as f: + reader = csv.reader(f) + samples = list(reader) + processed_videos = [sample[0] for sample in samples] + + # read video list + videos = get_filelist(video_folder) + print(f"Dataset contains {len(videos)} videos.") + videos = [video for video in videos if video not in processed_videos] + print(f"Processing {len(videos)} new videos.") + return videos diff --git a/tools/datasets/README.md b/tools/datasets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0118c8f180a25b43f3de0bd1b22f0ce8afd9ca84 --- /dev/null +++ b/tools/datasets/README.md @@ -0,0 +1,48 @@ +# Dataset Download and Management + +## Dataset Format + +The training data should be provided in a CSV file with the following format: + +```csv +/absolute/path/to/image1.jpg, caption1, num_of_frames +/absolute/path/to/image2.jpg, caption2, num_of_frames +``` + +## HD-VG-130M + +This dataset comprises 130M text-video pairs. You can download the dataset and prepare it for training according to [the dataset repository's instructions](https://github.com/daooshee/HD-VG-130M). There is a README.md file in the Google Drive link that provides instructions on how to download and cut the videos. For this version, we directly use the dataset provided by the authors. + +## Demo Dataset + +You can use ImageNet and UCF101 for a quick demo. After downloading the datasets, you can use the following command to prepare the csv file for the dataset: + +```bash +# ImageNet +python -m tools.datasets.convert_dataset imagenet IMAGENET_FOLDER --split train +# UCF101 +python -m tools.datasets.convert_dataset ucf101 UCF101_FOLDER --split videos +``` + +## Manage datasets + +We provide `csvutils.py` to manage the CSV files. You can use the following commands to process the CSV files: + +```bash +# generate DATA_fmin_128_fmax_256.csv with frames between 128 and 256 +python -m tools.datasets.csvutil DATA.csv --fmin 128 --fmax 256 +# generate DATA_root.csv with absolute path +python -m tools.datasets.csvutil DATA.csv --root /absolute/path/to/dataset +# remove videos with no captions +python -m tools.datasets.csvutil DATA.csv --remove-empty-caption +# compute the number of frames for each video +python -m tools.datasets.csvutil DATA.csv --relength +# remove caption prefix +python -m tools.datasets.csvutil DATA.csv --remove-caption-prefix +``` + +To merge multiple CSV files, you can use the following command: + +```bash +cat *csv > combined.csv +``` diff --git a/tools/datasets/__init__.py b/tools/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tools/datasets/convert_dataset.py b/tools/datasets/convert_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..4ff904fc20cbe58e696c9d606fbfb871561ed45c --- /dev/null +++ b/tools/datasets/convert_dataset.py @@ -0,0 +1,66 @@ +import argparse +import csv +import os + +from torchvision.datasets import ImageNet + + +def get_filelist(file_path): + Filelist = [] + for home, dirs, files in os.walk(file_path): + for filename in files: + Filelist.append(os.path.join(home, filename)) + return Filelist + + +def split_by_capital(name): + # BoxingPunchingBag -> Boxing Punching Bag + new_name = "" + for i in range(len(name)): + if name[i].isupper() and i != 0: + new_name += " " + new_name += name[i] + return new_name + + +def process_imagenet(root, split): + root = os.path.expanduser(root) + data = ImageNet(root, split=split) + samples = [(path, data.classes[label][0]) for path, label in data.samples] + output = f"imagenet_{split}.csv" + + with open(output, "w") as f: + writer = csv.writer(f) + writer.writerows(samples) + + print(f"Saved {len(samples)} samples to {output}.") + + +def process_ucf101(root, split): + root = os.path.expanduser(root) + video_lists = get_filelist(os.path.join(root, split)) + classes = [x.split("/")[-2] for x in video_lists] + classes = [split_by_capital(x) for x in classes] + samples = list(zip(video_lists, classes)) + output = f"ucf101_{split}.csv" + + with open(output, "w") as f: + writer = csv.writer(f) + writer.writerows(samples) + + print(f"Saved {len(samples)} samples to {output}.") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("dataset", type=str, choices=["imagenet", "ucf101"]) + parser.add_argument("root", type=str) + parser.add_argument("--split", type=str, default="train") + args = parser.parse_args() + + if args.dataset == "imagenet": + process_imagenet(args.root, args.split) + elif args.dataset == "ucf101": + process_ucf101(args.root, args.split) + else: + raise ValueError("Invalid dataset") diff --git a/tools/datasets/csvutil.py b/tools/datasets/csvutil.py new file mode 100644 index 0000000000000000000000000000000000000000..4bbd22db24962ce2c66656445a043c35fbeed38b --- /dev/null +++ b/tools/datasets/csvutil.py @@ -0,0 +1,96 @@ +import argparse +import csv +import os + +from tqdm import tqdm + +# path, name, #frames +PREFIX = [ + "The video shows", + "The video captures", + "The video features", + "The video depicts", + "The video presents", + "The video features", + "The video is ", + "In the video,", +] + + +def get_video_length(path): + import cv2 + + cap = cv2.VideoCapture(path) + return int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + +def main(args): + input_path = args.input + output_path = args.output + if output_path is None: + name = os.path.basename(input_path) + name, ext = os.path.splitext(name) + if args.fmin is not None: + name += f"_fmin_{args.fmin}" + if args.fmax is not None: + name += f"_fmax_{args.fmax}" + if args.remove_empty_caption: + name += "_rec" + if args.remove_caption_prefix: + name += "_rcp" + if args.root is not None: + name += f"_root" + if args.relength: + name += "_relength" + output_path = os.path.join(os.path.dirname(input_path), name + ext) + + with open(input_path, "r") as f: + reader = csv.reader(f) + data = list(reader) + print("Number of videos before filtering:", len(data)) + + data_new = [] + for i, row in tqdm(enumerate(data)): + path = row[0] + caption = row[1] + n_frames = int(row[2]) + if args.fmin is not None and n_frames < args.fmin: + continue + if args.fmax is not None and n_frames > args.fmax: + continue + if args.remove_empty_caption and len(caption) == 0: + continue + if args.remove_caption_prefix: + for prefix in PREFIX: + if caption.startswith(prefix): + caption = caption[len(prefix) :].strip() + if caption[0].islower(): + caption = caption[0].upper() + caption[1:] + row[1] = caption + break + if args.root is not None: + row[0] = os.path.join(args.root, path) + if args.relength: + n_frames = get_video_length(row[0]) + row[2] = n_frames + data_new.append(row) + + print("Number of videos after filtering:", len(data_new)) + with open(output_path, "w") as f: + writer = csv.writer(f) + writer.writerows(data_new) + print("Output saved to", output_path) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("input", type=str) + parser.add_argument("--output", type=str, default=None) + parser.add_argument("--fmin", type=int, default=None) + parser.add_argument("--fmax", type=int, default=None) + parser.add_argument("--root", type=str, default=None) + parser.add_argument("--remove-empty-caption", action="store_true") + parser.add_argument("--remove-caption-prefix", action="store_true") + parser.add_argument("--relength", action="store_true") + args = parser.parse_args() + main(args) diff --git a/tools/intepolate/README.md b/tools/intepolate/README.md new file mode 100644 index 0000000000000000000000000000000000000000..cd406e140267824d8f30405e09e7dfcb591eb207 --- /dev/null +++ b/tools/intepolate/README.md @@ -0,0 +1 @@ +# To be added diff --git a/tools/scenedetect/README.md b/tools/scenedetect/README.md new file mode 100644 index 0000000000000000000000000000000000000000..8052733739109237ea620a1983ba31b7473fee92 --- /dev/null +++ b/tools/scenedetect/README.md @@ -0,0 +1,9 @@ +# Scene Detection and Video Split + +Raw videos from the Internet may be too long for training. +Thus, we detect scenes in raw videos and split them into short clips based on the scenes. +First prepare the video processing packages. +```bash +pip install scenedetect moviepy opencv-python +``` +Then run `scene_detect.py`. We provide efficient processing using `multiprocessing`. Don't forget to specify your own dataset path. diff --git a/tools/scenedetect/scene_detect.py b/tools/scenedetect/scene_detect.py new file mode 100644 index 0000000000000000000000000000000000000000..c46e59d5abce24575d62a3e3bdffb2aed49efa0b --- /dev/null +++ b/tools/scenedetect/scene_detect.py @@ -0,0 +1,138 @@ +import os +from multiprocessing import Pool + +from mmengine.logging import MMLogger +from scenedetect import ContentDetector, detect +from tqdm import tqdm + +from opensora.utils.misc import get_timestamp + +from .utils import check_mp4_integrity, clone_folder_structure, iterate_files, split_video + +# config +target_fps = 30 # int +shorter_size = 512 # int +min_seconds = 1 # float +max_seconds = 5 # float +assert max_seconds > min_seconds +cfg = dict( + target_fps=target_fps, + min_seconds=min_seconds, + max_seconds=max_seconds, + shorter_size=shorter_size, +) + + +def process_folder(root_src, root_dst): + # create logger + folder_path_log = os.path.dirname(root_dst) + log_name = os.path.basename(root_dst) + timestamp = get_timestamp() + log_path = os.path.join(folder_path_log, f"{log_name}_{timestamp}.log") + logger = MMLogger.get_instance(log_name, log_file=log_path) + + # clone folder structure + clone_folder_structure(root_src, root_dst) + + # all source videos + mp4_list = [x for x in iterate_files(root_src) if x.endswith(".mp4")] + mp4_list = sorted(mp4_list) + + for idx, sample_path in tqdm(enumerate(mp4_list)): + folder_src = os.path.dirname(sample_path) + folder_dst = os.path.join(root_dst, os.path.relpath(folder_src, root_src)) + + # check src video integrity + if not check_mp4_integrity(sample_path, logger=logger): + continue + + # detect scenes + scene_list = detect(sample_path, ContentDetector(), start_in_scene=True) + + # split scenes + save_path_list = split_video(sample_path, scene_list, save_dir=folder_dst, **cfg, logger=logger) + + # check integrity of generated clips + for x in save_path_list: + check_mp4_integrity(x, logger=logger) + + +def scene_detect(): + """detect & cut scenes using a single process + Expected dataset structure: + data/ + your_dataset/ + raw_videos/ + xxx.mp4 + yyy.mp4 + + This function results in: + data/ + your_dataset/ + raw_videos/ + xxx.mp4 + yyy.mp4 + zzz.mp4 + clips/ + xxx_scene-0.mp4 + yyy_scene-0.mp4 + yyy_scene-1.mp4 + """ + # TODO: specify your dataset root + root_src = f"./data/your_dataset/raw_videos" + root_dst = f"./data/your_dataset/clips" + + process_folder(root_src, root_dst) + + +def scene_detect_mp(): + """detect & cut scenes using multiple processes + Expected dataset structure: + data/ + your_dataset/ + raw_videos/ + split_0/ + xxx.mp4 + yyy.mp4 + split_1/ + xxx.mp4 + yyy.mp4 + + This function results in: + data/ + your_dataset/ + raw_videos/ + split_0/ + xxx.mp4 + yyy.mp4 + split_1/ + xxx.mp4 + yyy.mp4 + clips/ + split_0/ + xxx_scene-0.mp4 + yyy_scene-0.mp4 + split_1/ + xxx_scene-0.mp4 + yyy_scene-0.mp4 + yyy_scene-1.mp4 + """ + # TODO: specify your dataset root + root_src = f"./data/your_dataset/raw_videos" + root_dst = f"./data/your_dataset/clips" + + # TODO: specify your splits + splits = ["split_0", "split_1"] + + # process folders + root_src_list = [os.path.join(root_src, x) for x in splits] + root_dst_list = [os.path.join(root_dst, x) for x in splits] + + with Pool(processes=len(splits)) as pool: + pool.starmap(process_folder, list(zip(root_src_list, root_dst_list))) + + +if __name__ == "__main__": + # TODO: choose single process or multiprocessing + scene_detect() + # scene_detect_mp() diff --git a/tools/scenedetect/utils.py b/tools/scenedetect/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..19eae31463bc1464b887877856bcf5c49ccde923 --- /dev/null +++ b/tools/scenedetect/utils.py @@ -0,0 +1,145 @@ +import os +import subprocess + +import cv2 +from imageio_ffmpeg import get_ffmpeg_exe +from mmengine.logging import print_log +from moviepy.editor import VideoFileClip +from scenedetect import FrameTimecode + + +def iterate_files(folder_path): + for root, dirs, files in os.walk(folder_path): + # root contains the current directory path + # dirs contains the list of subdirectories in the current directory + # files contains the list of files in the current directory + + # Process files in the current directory + for file in files: + file_path = os.path.join(root, file) + # print("File:", file_path) + yield file_path + + # Process subdirectories and recursively call the function + for subdir in dirs: + subdir_path = os.path.join(root, subdir) + # print("Subdirectory:", subdir_path) + iterate_files(subdir_path) + + +def iterate_folders(folder_path): + for root, dirs, files in os.walk(folder_path): + for subdir in dirs: + subdir_path = os.path.join(root, subdir) + yield subdir_path + # print("Subdirectory:", subdir_path) + iterate_folders(subdir_path) + + +def clone_folder_structure(root_src, root_dst, verbose=False): + src_path_list = iterate_folders(root_src) + src_relpath_list = [os.path.relpath(x, root_src) for x in src_path_list] + + os.makedirs(root_dst, exist_ok=True) + dst_path_list = [os.path.join(root_dst, x) for x in src_relpath_list] + for folder_path in dst_path_list: + os.makedirs(folder_path, exist_ok=True) + if verbose: + print(f"Create folder: '{folder_path}'") + + +def count_files(root, suffix=".mp4"): + files_list = iterate_files(root) + cnt = len([x for x in files_list if x.endswith(suffix)]) + return cnt + + +def check_mp4_integrity(file_path, verbose=True, logger=None): + try: + VideoFileClip(file_path) + if verbose: + print_log(f"The MP4 file '{file_path}' is intact.", logger=logger) + return True + except Exception as e: + if verbose: + print_log(f"Error: {e}", logger=logger) + print_log(f"The MP4 file '{file_path}' is not intact.", logger=logger) + return False + + +def count_frames(video_path): + cap = cv2.VideoCapture(video_path) + + if not cap.isOpened(): + print(f"Error: Could not open video file '{video_path}'") + return + + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + print(f"Total frames in the video '{video_path}': {total_frames}") + + cap.release() + + +def split_video( + sample_path, + scene_list, + save_dir, + target_fps=30, + min_seconds=1, + max_seconds=10, + shorter_size=512, + verbose=False, + logger=None, +): + FFMPEG_PATH = get_ffmpeg_exe() + + save_path_list = [] + for idx, scene in enumerate(scene_list): + s, t = scene # FrameTimecode + fps = s.framerate + max_duration = FrameTimecode(timecode="00:00:00", fps=fps) + max_duration.frame_num = round(fps * max_seconds) + duration = min(max_duration, t - s) + if duration.get_frames() < round(min_seconds * fps): + continue + + # save path + fname = os.path.basename(sample_path) + fname_wo_ext = os.path.splitext(fname)[0] + # TODO: fname pattern + save_path = os.path.join(save_dir, f"{fname_wo_ext}_scene-{idx}.mp4") + + # ffmpeg cmd + cmd = [FFMPEG_PATH] + + # Only show ffmpeg output for the first call, which will display any + # errors if it fails, and then break the loop. We only show error messages + # for the remaining calls. + # cmd += ['-v', 'error'] + + # input path + cmd += ["-i", sample_path] + + # clip to cut + cmd += ["-nostdin", "-y", "-ss", str(s.get_seconds()), "-t", str(duration.get_seconds())] + + # target fps + # cmd += ['-vf', 'select=mod(n\,2)'] + cmd += ["-r", f"{target_fps}"] + + # aspect ratio + cmd += ["-vf", f"scale='if(gt(iw,ih),-2,{shorter_size})':'if(gt(iw,ih),{shorter_size},-2)'"] + # cmd += ['-vf', f"scale='if(gt(iw,ih),{shorter_size},trunc(ow/a/2)*2)':-2"] + + cmd += ["-map", "0", save_path] + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + stdout, stderr = proc.communicate() + if verbose: + stdout = stdout.decode("utf-8") + print_log(stdout, logger=logger) + + save_path_list.append(sample_path) + print_log(f"Video clip saved to '{save_path}'", logger=logger) + + return save_path_list