diff --git a/NOTICE.md b/NOTICE.md new file mode 100644 index 0000000000000000000000000000000000000000..e6dad38733a306f61263d083c471e6bac403e222 --- /dev/null +++ b/NOTICE.md @@ -0,0 +1,76 @@ +# NOTICE + +Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved. + +--- + +## Aviso de Propriedade Intelectual e Licenciamento + +### **Processo de Patenteamento em Andamento (EM PORTUGUÊS):** + +O método e o sistema de orquestração de prompts denominados **ADUC (Automated Discovery and Orchestration of Complex tasks)**, conforme descritos neste documento e implementados neste software, estão atualmente em processo de patenteamento. + +O titular dos direitos, Carlos Rodrigues dos Santos, está buscando proteção legal para as inovações chave da arquitetura ADUC, incluindo, mas não se limitando a: + +* Fragmentação e escalonamento de solicitações que excedem limites de contexto de modelos de IA. +* Distribuição inteligente de sub-tarefas para especialistas heterogêneos. +* Gerenciamento de estado persistido com avaliação iterativa e realimentação para o planejamento de próximas etapas. +* Planejamento e roteamento sensível a custo, latência e requisitos de qualidade. +* O uso de "tokens universais" para comunicação agnóstica a modelos. + +### **Reconhecimento e Implicações (EM PORTUGUÊS):** + +Ao acessar ou utilizar este software e a arquitetura ADUC aqui implementada, você reconhece: + +1. A natureza inovadora e a importância da arquitetura ADUC no campo da orquestração de prompts para IA. +2. Que a essência desta arquitetura, ou suas implementações derivadas, podem estar sujeitas a direitos de propriedade intelectual, incluindo patentes. +3. Que o uso comercial, a reprodução da lógica central da ADUC em sistemas independentes, ou a exploração direta da invenção sem o devido licenciamento podem infringir os direitos de patente pendente. + +--- + +### **Patent Pending (IN ENGLISH):** + +The method and system for prompt orchestration named **ADUC (Automated Discovery and Orchestration of Complex tasks)**, as described herein and implemented in this software, are currently in the process of being patented. + +The rights holder, Carlos Rodrigues dos Santos, is seeking legal protection for the key innovations of the ADUC architecture, including, but not limited to: + +* Fragmentation and scaling of requests exceeding AI model context limits. +* Intelligent distribution of sub-tasks to heterogeneous specialists. +* Persistent state management with iterative evaluation and feedback for planning subsequent steps. +* Cost, latency, and quality-aware planning and routing. +* The use of "universal tokens" for model-agnostic communication. + +### **Acknowledgement and Implications (IN ENGLISH):** + +By accessing or using this software and the ADUC architecture implemented herein, you acknowledge: + +1. The innovative nature and significance of the ADUC architecture in the field of AI prompt orchestration. +2. That the essence of this architecture, or its derivative implementations, may be subject to intellectual property rights, including patents. +3. That commercial use, reproduction of ADUC's core logic in independent systems, or direct exploitation of the invention without proper licensing may infringe upon pending patent rights. + +--- + +## Licença AGPLv3 + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . + +--- + +**Contato para Consultas:** + +Para mais informações sobre a arquitetura ADUC, o status do patenteamento, ou para discutir licenciamento para usos comerciais ou não conformes com a AGPLv3, por favor, entre em contato: + +Carlos Rodrigues dos Santos +carlex22@gmail.com +Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025 \ No newline at end of file diff --git a/README.md b/README.md index a62cf1735f8da6e7e0a9f49212655f2fce9de1ef..b3c33af7ab6f30aeefd2bdd1eada282168b62cf9 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,210 @@ --- -title: Aduc Sdr VIDEO -emoji: 📊 -colorFrom: gray -colorTo: pink +title: Euia-AducSdr +emoji: 🎥 +colorFrom: indigo +colorTo: purple sdk: gradio -sdk_version: 5.44.0 app_file: app.py -pinned: false +pinned: true license: agpl-3.0 +short_description: Uma implementação aberta e funcional da arquitetura ADUC-SDR --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +### 🇧🇷 Português + +Uma implementação aberta e funcional da arquitetura ADUC-SDR (Arquitetura de Unificação Compositiva - Escala Dinâmica e Resiliente), projetada para a geração de vídeo coerente de longa duração. Este projeto materializa os princípios de fragmentação, navegação geométrica e um mecanismo de "eco causal 4bits memoria" para garantir a continuidade física e narrativa em sequências de vídeo geradas por múltiplos modelos de IA. + +**Licença:** Este projeto é licenciado sob os termos da **GNU Affero General Public License v3.0**. Isto significa que se você usar este software (ou qualquer trabalho derivado) para fornecer um serviço através de uma rede, você é **obrigado a disponibilizar o código-fonte completo** da sua versão para os usuários desse serviço. + +- **Copyright (C) 4 de Agosto de 2025, Carlos Rodrigues dos Santos** +- Uma cópia completa da licença pode ser encontrada no arquivo [LICENSE](LICENSE). + +--- + +### 🇬🇧 English + +An open and functional implementation of the ADUC-SDR (Architecture for Compositive Unification - Dynamic and Resilient Scaling) architecture, designed for long-form coherent video generation. This project materializes the principles of fragmentation, geometric navigation, and a "causal echo 4bits memori" mechanism to ensure physical and narrative continuity in video sequences generated by multiple AI models. + +**License:** This project is licensed under the terms of the **GNU Affero General Public License v3.0**. This means that if you use this software (or any derivative work) to provide a service over a network, you are **required to make the complete source code** of your version available to the users of that service. + +- **Copyright (C) August 4, 2025, Carlos Rodrigues dos Santos** +- A full copy of the license can be found in the [LICENSE](LICENSE) file. + +--- + +## **Aviso de Propriedade Intelectual e Patenteamento** + +### **Processo de Patenteamento em Andamento (EM PORTUGUÊS):** + +A arquitetura e o método **ADUC (Automated Discovery and Orchestration of Complex tasks)**, conforme descritos neste projeto e nas reivindicações associadas, estão **atualmente em processo de patenteamento**. + +O titular dos direitos, Carlos Rodrigues dos Santos, está buscando proteção legal para as inovações chave da arquitetura ADUC, que incluem, mas não se limitam a: + +* Fragmentação e escalonamento de solicitações que excedem limites de contexto de modelos de IA. +* Distribuição inteligente de sub-tarefas para especialistas heterogêneos. +* Gerenciamento de estado persistido com avaliação iterativa e realimentação para o planejamento de próximas etapas. +* Planejamento e roteamento sensível a custo, latência e requisitos de qualidade. +* O uso de "tokens universais" para comunicação agnóstica a modelos. + +Ao utilizar este software e a arquitetura ADUC aqui implementada, você reconhece a natureza inovadora desta arquitetura e que a **reprodução ou exploração da lógica central da ADUC em sistemas independentes pode infringir direitos de patente pendente.** + +--- + +### **Patent Pending (IN ENGLISH):** + +The **ADUC (Automated Discovery and Orchestration of Complex tasks)** architecture and method, as described in this project and its associated claims, are **currently in the process of being patented.** + +The rights holder, Carlos Rodrigues dos Santos, is seeking legal protection for the key innovations of the ADUC architecture, including, but not limited to: + +* Fragmentation and scaling of requests exceeding AI model context limits. +* Intelligent distribution of sub-tasks to heterogeneous specialists. +* Persistent state management with iterative evaluation and feedback for planning subsequent steps. +* Cost, latency, and quality-aware planning and routing. +* The use of "universal tokens" for model-agnostic communication. + +By using this software and the ADUC architecture implemented herein, you acknowledge the innovative nature of this architecture and that **the reproduction or exploitation of ADUC's core logic in independent systems may infringe upon pending patent rights.** + +--- + +### Detalhes Técnicos e Reivindicações da ADUC + +#### 🇧🇷 Definição Curta (para Tese e Patente) + +**ADUC** é um *framework pré-input* e *intermediário* de **gerenciamento de prompts** que: + +1. **fragmenta** solicitações acima do limite de contexto de qualquer modelo, +2. **escala linearmente** (processo sequencial com memória persistida), +3. **distribui** sub-tarefas a **especialistas** (modelos/ferramentas heterogêneos), e +4. **realimenta** a próxima etapa com avaliação do que foi feito/esperado (LLM diretor). + +Não é um modelo; é uma **camada orquestradora** plugável antes do input de modelos existentes (texto, imagem, áudio, vídeo), usando *tokens universais* e a tecnologia atual. + +#### 🇬🇧 Short Definition (for Thesis and Patent) + +**ADUC** is a *pre-input* and *intermediate* **prompt management framework** that: + +1. **fragments** requests exceeding any model's context limit, +2. **scales linearly** (sequential process with persisted memory), +3. **distributes** sub-tasks to **specialists** (heterogeneous models/tools), and +4. **feeds back** to the next step with an evaluation of what was done/expected (director LLM). + +It is not a model; it is a pluggable **orchestration layer** before the input of existing models (text, image, audio, video), using *universal tokens* and current technology. + +--- + +#### 🇧🇷 Elementos Essenciais (Telegráfico) + +* **Agnóstico a modelos:** opera com qualquer LLM/difusor/API. +* **Pré-input manager:** recebe pedido do usuário, **divide** em blocos ≤ limite de tokens, **prioriza**, **agenda** e **roteia**. +* **Memória persistida:** resultados/latentes/“eco” viram **estado compartilhado** para o próximo bloco (nada é ignorado). +* **Especialistas:** *routers* decidem quem faz o quê (ex.: “descrição → LLM-A”, “keyframe → Img-B”, “vídeo → Vid-C”). +* **Controle de qualidade:** LLM diretor compara *o que fez* × *o que deveria* × *o que falta* e **regenera objetivos** do próximo fragmento. +* **Custo/latência-aware:** planeja pela **VRAM/tempo/custo**, não tenta “abraçar tudo de uma vez”. + +#### 🇬🇧 Essential Elements (Telegraphic) + +* **Model-agnostic:** operates with any LLM/diffuser/API. +* **Pre-input manager:** receives user request, **divides** into blocks ≤ token limit, **prioritizes**, **schedules**, and **routes**. +* **Persisted memory:** results/latents/“echo” become **shared state** for the next block (nothing is ignored). +* **Specialists:** *routers* decide who does what (e.g., “description → LLM-A”, “keyframe → Img-B”, “video → Vid-C”). +* **Quality control:** director LLM compares *what was done* × *what should be done* × *what is missing* and **regenerates objectives** for the next fragment. +* **Cost/latency-aware:** plans by **VRAM/time/cost**, does not try to “embrace everything at once”. + +--- + +#### 🇧🇷 Reivindicações Independentes (Método e Sistema) + +**Reivindicação Independente (Método) — Versão Enxuta:** + +1. **Método** de **orquestração de prompts** para execução de tarefas acima do limite de contexto de modelos de IA, compreendendo: + (a) **receber** uma solicitação que excede um limite de tokens; + (b) **analisar** a solicitação por um **LLM diretor** e **fragmentá-la** em sub-tarefas ≤ limite; + (c) **selecionar** especialistas de execução para cada sub-tarefa com base em capacidades declaradas; + (d) **gerar** prompts específicos por sub-tarefa em **tokens universais**, incluindo referências ao **estado persistido** de execuções anteriores; + (e) **executar sequencialmente** as sub-tarefas e **persistir** suas saídas como memória (incluindo latentes/eco/artefatos); + (f) **avaliar** automaticamente a saída versus metas declaradas e **regenerar objetivos** do próximo fragmento; + (g) **iterar** (b)–(f) até que os critérios de completude sejam atendidos, produzindo o resultado agregado; + em que o framework **escala linearmente** no tempo e armazenamento físico, **independente** da janela de contexto dos modelos subjacentes. + +**Reivindicação Independente (Sistema):** + +2. **Sistema** de orquestração de prompts, compreendendo: um **planejador LLM diretor**; um **roteador de especialistas**; um **banco de estado persistido** (incl. memória cinética para vídeo); um **gerador de prompts universais**; e um **módulo de avaliação/realimentação**, acoplados por uma **API pré-input** a modelos heterogêneos. + +#### 🇬🇧 Independent Claims (Method and System) + +**Independent Claim (Method) — Concise Version:** + +1. A **method** for **prompt orchestration** for executing tasks exceeding AI model context limits, comprising: + (a) **receiving** a request that exceeds a token limit; + (b) **analyzing** the request by a **director LLM** and **fragmenting it** into sub-tasks ≤ the limit; + (c) **selecting** execution specialists for each sub-task based on declared capabilities; + (d) **generating** specific prompts per sub-task in **universal tokens**, including references to the **persisted state** of previous executions; + (e) **sequentially executing** the sub-tasks and **persisting** their outputs as memory (including latents/echo/artifacts); + (f) **automatically evaluating** the output against declared goals and **regenerating objectives** for the next fragment; + (g) **iterating** (b)–(f) until completion criteria are met, producing the aggregated result; + wherein the framework **scales linearly** in time and physical storage, **independent** of the context window of the underlying models. + +**Independent Claim (System):** + +2. A prompt orchestration **system**, comprising: a **director LLM planner**; a **specialist router**; a **persisted state bank** (incl. kinetic memory for video); a **universal prompt generator**; and an **evaluation/feedback module**, coupled via a **pre-input API** to heterogeneous models. + +--- + +#### 🇧🇷 Dependentes Úteis + +* (3) Onde o roteamento considera **custo/latência/VRAM** e metas de qualidade. +* (4) Onde o banco de estado inclui **eco cinético** para vídeo (últimos *n* frames/latentes/fluxo). +* (5) Onde a avaliação usa métricas específicas por domínio (Lflow, consistência semântica, etc.). +* (6) Onde *tokens universais* padronizam instruções entre especialistas. +* (7) Onde a orquestração decide **cut vs continuous** e **corte regenerativo** (Déjà-Vu) ao editar vídeo. +* (8) Onde o sistema **nunca descarta** conteúdo excedente: **reagenda** em novos fragmentos. + +#### 🇬🇧 Useful Dependents + +* (3) Wherein routing considers **cost/latency/VRAM** and quality goals. +* (4) Wherein the state bank includes **kinetic echo** for video (last *n* frames/latents/flow). +* (5) Wherein evaluation uses domain-specific metrics (Lflow, semantic consistency, etc.). +* (6) Wherein *universal tokens* standardize instructions between specialists. +* (7) Wherein orchestration decides **cut vs continuous** and **regenerative cut** (Déjà-Vu) when editing video. +* (8) Wherein the system **never discards** excess content: it **reschedules** it in new fragments. + +--- + +#### 🇧🇷 Como isso conversa com SDR (Vídeo) + +* **Eco Cinético**: é um **tipo de estado persistido** consumido pelo próximo passo. +* **Déjà-Vu (Corte Regenerativo)**: é **uma política de orquestração** aplicada quando há edição; ADUC decide, monta os prompts certos e chama o especialista de vídeo. +* **Cut vs Continuous**: decisão do **diretor** com base em estado + metas; ADUC roteia e garante a sobreposição/remoção final. + +#### 🇬🇧 How this Converses with SDR (Video) + +* **Kinetic Echo**: is a **type of persisted state** consumed by the next step. +* **Déjà-Vu (Regenerative Cut)**: is an **orchestration policy** applied during editing; ADUC decides, crafts the right prompts, and calls the video specialist. +* **Cut vs Continuous**: decision made by the **director** based on state + goals; ADUC routes and ensures the final overlap/removal. + +--- + +#### 🇧🇷 Mensagem Clara ao Usuário (Experiência) + +> “Seu pedido excede o limite X do modelo Y. Em vez de truncar silenciosamente, o **ADUC** dividirá e **entregará 100%** do conteúdo por etapas coordenadas.” + +Isso é diferencial prático e jurídico: **não-obviedade** por transformar limite de contexto em **pipeline controlado**, com **persistência de estado** e **avaliação iterativa**. + +#### 🇬🇧 Clear User Message (Experience) + +> "Your request exceeds model Y's limit X. Instead of silently truncating, **ADUC** will divide and **deliver 100%** of the content through coordinated steps." + +This is a practical and legal differentiator: **non-obviousness** by transforming context limits into a **controlled pipeline**, with **state persistence** and **iterative evaluation**. + +--- + +### Contact / Contato / Contacto + +- **Author / Autor:** Carlos Rodrigues dos Santos +- **Email:** carlex22@gmail.com +- **GitHub:** [https://github.com/carlex22/Aduc-sdr](https://github.com/carlex22/Aduc-sdr) +- **Hugging Face Spaces:** + - [Ltx-SuperTime-60Secondos](https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/) + - [Novinho](https://huggingface.co/spaces/Carlexxx/Novinho/) + +--- \ No newline at end of file diff --git a/configs/ltxv-13b-0.9.7-dev.yaml b/configs/ltxv-13b-0.9.7-dev.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ae548253526c1de5804bb430407850573305cd14 --- /dev/null +++ b/configs/ltxv-13b-0.9.7-dev.yaml @@ -0,0 +1,34 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-13b-0.9.7-dev.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + guidance_scale: [1, 1, 6, 8, 6, 1, 1] + stg_scale: [0, 0, 4, 4, 4, 2, 1] + rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1] + guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180] + skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]] + num_inference_steps: 30 + skip_final_inference_steps: 3 + cfg_star_rescale: true + +second_pass: + guidance_scale: [1] + stg_scale: [1] + rescaling_scale: [1] + guidance_timesteps: [1.0] + skip_block_list: [27] + num_inference_steps: 30 + skip_initial_inference_steps: 17 + cfg_star_rescale: true \ No newline at end of file diff --git a/configs/ltxv-13b-0.9.7-distilled.yaml b/configs/ltxv-13b-0.9.7-distilled.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9df17bb001b39d6d12c7013cb823c44b85d28aea --- /dev/null +++ b/configs/ltxv-13b-0.9.7-distilled.yaml @@ -0,0 +1,28 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-13b-0.9.7-distilled.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.7.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + +second_pass: + timesteps: [0.9094, 0.7250, 0.4219] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] diff --git a/configs/ltxv-13b-0.9.8-dev-fp8.yaml b/configs/ltxv-13b-0.9.8-dev-fp8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..76b25f1373061a873a3134d471b927b66c37aa54 --- /dev/null +++ b/configs/ltxv-13b-0.9.8-dev-fp8.yaml @@ -0,0 +1,34 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-13b-0.9.8-dev-fp8.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + guidance_scale: [1, 1, 6, 8, 6, 1, 1] + stg_scale: [0, 0, 4, 4, 4, 2, 1] + rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1] + guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180] + skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]] + num_inference_steps: 30 + skip_final_inference_steps: 3 + cfg_star_rescale: true + +second_pass: + guidance_scale: [1] + stg_scale: [1] + rescaling_scale: [1] + guidance_timesteps: [1.0] + skip_block_list: [27] + num_inference_steps: 30 + skip_initial_inference_steps: 17 + cfg_star_rescale: true diff --git a/configs/ltxv-13b-0.9.8-dev.yaml b/configs/ltxv-13b-0.9.8-dev.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0c22e9e5b3704146d521e7c60a841c043373c66e --- /dev/null +++ b/configs/ltxv-13b-0.9.8-dev.yaml @@ -0,0 +1,34 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-13b-0.9.8-dev.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + guidance_scale: [1, 1, 6, 8, 6, 1, 1] + stg_scale: [0, 0, 4, 4, 4, 2, 1] + rescaling_scale: [1, 1, 0.5, 0.5, 1, 1, 1] + guidance_timesteps: [1.0, 0.996, 0.9933, 0.9850, 0.9767, 0.9008, 0.6180] + skip_block_list: [[], [11, 25, 35, 39], [22, 35, 39], [28], [28], [28], [28]] + num_inference_steps: 30 + skip_final_inference_steps: 3 + cfg_star_rescale: true + +second_pass: + guidance_scale: [1] + stg_scale: [1] + rescaling_scale: [1] + guidance_timesteps: [1.0] + skip_block_list: [27] + num_inference_steps: 30 + skip_initial_inference_steps: 17 + cfg_star_rescale: true \ No newline at end of file diff --git a/configs/ltxv-13b-0.9.8-distilled-fp8.yaml b/configs/ltxv-13b-0.9.8-distilled-fp8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..444718bacbaa698c6b3df9cff6c89c9a2f95923c --- /dev/null +++ b/configs/ltxv-13b-0.9.8-distilled-fp8.yaml @@ -0,0 +1,29 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-13b-0.9.8-distilled-fp8.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + +second_pass: + timesteps: [0.9094, 0.7250, 0.4219] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + tone_map_compression_ratio: 0.6 diff --git a/configs/ltxv-13b-0.9.8-distilled.yaml b/configs/ltxv-13b-0.9.8-distilled.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a1ac7239f3c3ecf0a8e4e03c3a1415a8b257dbf0 --- /dev/null +++ b/configs/ltxv-13b-0.9.8-distilled.yaml @@ -0,0 +1,29 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-13b-0.9.8-distilled.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + +second_pass: + timesteps: [0.9094, 0.7250, 0.4219] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + tone_map_compression_ratio: 0.6 diff --git a/configs/ltxv-2b-0.9.1.yaml b/configs/ltxv-2b-0.9.1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6e888de3fb5ff258cd4caf52453eb707a3941761 --- /dev/null +++ b/configs/ltxv-2b-0.9.1.yaml @@ -0,0 +1,17 @@ +pipeline_type: base +checkpoint_path: "ltx-video-2b-v0.9.1.safetensors" +guidance_scale: 3 +stg_scale: 1 +rescaling_scale: 0.7 +skip_block_list: [19] +num_inference_steps: 40 +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false \ No newline at end of file diff --git a/configs/ltxv-2b-0.9.5.yaml b/configs/ltxv-2b-0.9.5.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5998c6040bdbc3b4b0f6838bb7b61b58d0b58b5d --- /dev/null +++ b/configs/ltxv-2b-0.9.5.yaml @@ -0,0 +1,17 @@ +pipeline_type: base +checkpoint_path: "ltx-video-2b-v0.9.5.safetensors" +guidance_scale: 3 +stg_scale: 1 +rescaling_scale: 0.7 +skip_block_list: [19] +num_inference_steps: 40 +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false \ No newline at end of file diff --git a/configs/ltxv-2b-0.9.6-dev.yaml b/configs/ltxv-2b-0.9.6-dev.yaml new file mode 100644 index 0000000000000000000000000000000000000000..487f99708e0672dd17b5bd78424f25261163f7dc --- /dev/null +++ b/configs/ltxv-2b-0.9.6-dev.yaml @@ -0,0 +1,17 @@ +pipeline_type: base +checkpoint_path: "ltxv-2b-0.9.6-dev-04-25.safetensors" +guidance_scale: 3 +stg_scale: 1 +rescaling_scale: 0.7 +skip_block_list: [19] +num_inference_steps: 40 +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false \ No newline at end of file diff --git a/configs/ltxv-2b-0.9.6-distilled.yaml b/configs/ltxv-2b-0.9.6-distilled.yaml new file mode 100644 index 0000000000000000000000000000000000000000..328d9291613f16ba191cb56f97340f3bfa4d341d --- /dev/null +++ b/configs/ltxv-2b-0.9.6-distilled.yaml @@ -0,0 +1,16 @@ +pipeline_type: base +checkpoint_path: "ltxv-2b-0.9.6-distilled-04-25.safetensors" +guidance_scale: 1 +stg_scale: 0 +rescaling_scale: 1 +num_inference_steps: 8 +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: true \ No newline at end of file diff --git a/configs/ltxv-2b-0.9.8-distilled-fp8.yaml b/configs/ltxv-2b-0.9.8-distilled-fp8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c02b2057cb2050ea8f277697a3d741ce1ed03403 --- /dev/null +++ b/configs/ltxv-2b-0.9.8-distilled-fp8.yaml @@ -0,0 +1,28 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-2b-0.9.8-distilled-fp8.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "float8_e4m3fn" # options: "float8_e4m3fn", "bfloat16", "mixed_precision" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + +second_pass: + timesteps: [0.9094, 0.7250, 0.4219] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] diff --git a/configs/ltxv-2b-0.9.8-distilled.yaml b/configs/ltxv-2b-0.9.8-distilled.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e24b0eb46b7113e2fe52b3d86d8f0eb4adae8de --- /dev/null +++ b/configs/ltxv-2b-0.9.8-distilled.yaml @@ -0,0 +1,28 @@ +pipeline_type: multi-scale +checkpoint_path: "ltxv-2b-0.9.8-distilled.safetensors" +downscale_factor: 0.6666666 +spatial_upscaler_model_path: "ltxv-spatial-upscaler-0.9.8.safetensors" +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false + +first_pass: + timesteps: [1.0000, 0.9937, 0.9875, 0.9812, 0.9750, 0.9094, 0.7250] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] + +second_pass: + timesteps: [0.9094, 0.7250, 0.4219] + guidance_scale: 1 + stg_scale: 0 + rescaling_scale: 1 + skip_block_list: [42] diff --git a/configs/ltxv-2b-0.9.yaml b/configs/ltxv-2b-0.9.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f501ca62c24085192cebe10c87261fba38c930bc --- /dev/null +++ b/configs/ltxv-2b-0.9.yaml @@ -0,0 +1,17 @@ +pipeline_type: base +checkpoint_path: "ltx-video-2b-v0.9.safetensors" +guidance_scale: 3 +stg_scale: 1 +rescaling_scale: 0.7 +skip_block_list: [19] +num_inference_steps: 40 +stg_mode: "attention_values" # options: "attention_values", "attention_skip", "residual", "transformer_block" +decode_timestep: 0.05 +decode_noise_scale: 0.025 +text_encoder_model_name_or_path: "PixArt-alpha/PixArt-XL-2-1024-MS" +precision: "bfloat16" +sampler: "from_checkpoint" # options: "uniform", "linear-quadratic", "from_checkpoint" +prompt_enhancement_words_threshold: 120 +prompt_enhancer_image_caption_model_name_or_path: "MiaoshouAI/Florence-2-large-PromptGen-v2.0" +prompt_enhancer_llm_model_name_or_path: "unsloth/Llama-3.2-3B-Instruct" +stochastic_sampling: false \ No newline at end of file diff --git a/deformes4D_engine.py b/deformes4D_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..bdbc68fb5dda1ea03855267a47cd43f5b20b2c77 --- /dev/null +++ b/deformes4D_engine.py @@ -0,0 +1,292 @@ +# deformes4D_engine.py +# Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos +# +# MODIFICATIONS FOR ADUC-SDR: +# Copyright (C) 2025 Carlos Rodrigues dos Santos. All rights reserved. +# +# This file is part of the ADUC-SDR project. It contains the core logic for +# video fragment generation, latent manipulation, and dynamic editing, +# governed by the ADUC orchestrator. +# This component is licensed under the GNU Affero General Public License v3.0. +# +# AVISO DE PATENTE PENDENTE: O método e sistema ADUC implementado neste +# software está em processo de patenteamento. Consulte NOTICE.md. + +import os +import time +import imageio +import numpy as np +import torch +import logging +from PIL import Image, ImageOps +from dataclasses import dataclass +import gradio as gr +import subprocess +import random +import gc + +from audio_specialist import audio_specialist_singleton +from ltx_manager_helpers import ltx_manager_singleton +from flux_kontext_helpers import flux_kontext_singleton +from gemini_helpers import gemini_singleton +from ltx_video.models.autoencoders.vae_encode import vae_encode, vae_decode + +logger = logging.getLogger(__name__) + +@dataclass +class LatentConditioningItem: + latent_tensor: torch.Tensor + media_frame_number: int + conditioning_strength: float + +class Deformes4DEngine: + def __init__(self, ltx_manager, workspace_dir="deformes_workspace"): + self.ltx_manager = ltx_manager + self.workspace_dir = workspace_dir + self._vae = None + self.device = 'cuda' if torch.cuda.is_available() else 'cpu' + logger.info("Especialista Deformes4D (SDR Executor) inicializado.") + + @property + def vae(self): + if self._vae is None: + self._vae = self.ltx_manager.workers[0].pipeline.vae + self._vae.to(self.device); self._vae.eval() + return self._vae + + def save_latent_tensor(self, tensor: torch.Tensor, path: str): + torch.save(tensor.cpu(), path) + logger.info(f"Tensor latente salvo em: {path}") + + def load_latent_tensor(self, path: str) -> torch.Tensor: + tensor = torch.load(path, map_location=self.device) + logger.info(f"Tensor latente carregado de: {path} para o dispositivo {self.device}") + return tensor + + @torch.no_grad() + def pixels_to_latents(self, tensor: torch.Tensor) -> torch.Tensor: + tensor = tensor.to(self.device, dtype=self.vae.dtype) + return vae_encode(tensor, self.vae, vae_per_channel_normalize=True) + + @torch.no_grad() + def latents_to_pixels(self, latent_tensor: torch.Tensor, decode_timestep: float = 0.05) -> torch.Tensor: + latent_tensor = latent_tensor.to(self.device, dtype=self.vae.dtype) + timestep_tensor = torch.tensor([decode_timestep] * latent_tensor.shape[0], device=self.device, dtype=latent_tensor.dtype) + return vae_decode(latent_tensor, self.vae, is_video=True, timestep=timestep_tensor, vae_per_channel_normalize=True) + + def save_video_from_tensor(self, video_tensor: torch.Tensor, path: str, fps: int = 24): + if video_tensor is None or video_tensor.ndim != 5 or video_tensor.shape[2] == 0: return + video_tensor = video_tensor.squeeze(0).permute(1, 2, 3, 0) + video_tensor = (video_tensor.clamp(-1, 1) + 1) / 2.0 + video_np = (video_tensor.detach().cpu().float().numpy() * 255).astype(np.uint8) + with imageio.get_writer(path, fps=fps, codec='libx264', quality=8) as writer: + for frame in video_np: writer.append_data(frame) + logger.info(f"Vídeo salvo em: {path}") + + def _preprocess_image_for_latent_conversion(self, image: Image.Image, target_resolution: tuple) -> Image.Image: + if image.size != target_resolution: + logger.info(f" - AÇÃO: Redimensionando imagem de {image.size} para {target_resolution} antes da conversão para latente.") + return ImageOps.fit(image, target_resolution, Image.Resampling.LANCZOS) + return image + + def pil_to_latent(self, pil_image: Image.Image) -> torch.Tensor: + image_np = np.array(pil_image).astype(np.float32) / 255.0 + tensor = torch.from_numpy(image_np).permute(2, 0, 1).unsqueeze(0).unsqueeze(2) + tensor = (tensor * 2.0) - 1.0 + return self.pixels_to_latents(tensor) + + def _generate_video_and_audio_from_latents(self, latent_tensor, audio_prompt, base_name): + silent_video_path = os.path.join(self.workspace_dir, f"{base_name}_silent.mp4") + pixel_tensor = self.latents_to_pixels(latent_tensor) + self.save_video_from_tensor(pixel_tensor, silent_video_path, fps=24) + del pixel_tensor; gc.collect() + + try: + result = subprocess.run( + ["ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", silent_video_path], + capture_output=True, text=True, check=True) + frag_duration = float(result.stdout.strip()) + except (subprocess.CalledProcessError, ValueError, FileNotFoundError): + logger.warning(f"ffprobe falhou em {os.path.basename(silent_video_path)}. Calculando duração manualmente.") + num_pixel_frames = latent_tensor.shape[2] * 8 + frag_duration = num_pixel_frames / 24.0 + + video_with_audio_path = audio_specialist_singleton.generate_audio_for_video( + video_path=silent_video_path, prompt=audio_prompt, + duration_seconds=frag_duration) + + if os.path.exists(silent_video_path): + os.remove(silent_video_path) + return video_with_audio_path + + def _generate_latent_tensor_internal(self, conditioning_items, ltx_params, target_resolution, total_frames_to_generate): + final_ltx_params = { + **ltx_params, + 'width': target_resolution[0], 'height': target_resolution[1], + 'video_total_frames': total_frames_to_generate, 'video_fps': 24, + 'current_fragment_index': int(time.time()), + 'conditioning_items_data': conditioning_items + } + new_full_latents, _ = self.ltx_manager.generate_latent_fragment(**final_ltx_params) + return new_full_latents + + def concatenate_videos_ffmpeg(self, video_paths: list[str], output_path: str) -> str: + if not video_paths: + raise gr.Error("Nenhum fragmento de vídeo para montar.") + list_file_path = os.path.join(self.workspace_dir, "concat_list.txt") + with open(list_file_path, 'w', encoding='utf-8') as f: + for path in video_paths: + f.write(f"file '{os.path.abspath(path)}'\n") + cmd_list = ['ffmpeg', '-y', '-f', 'concat', '-safe', '0', '-i', list_file_path, '-c', 'copy', output_path] + logger.info("Executando concatenação FFmpeg...") + try: + subprocess.run(cmd_list, check=True, capture_output=True, text=True) + except subprocess.CalledProcessError as e: + logger.error(f"Erro no FFmpeg: {e.stderr}") + raise gr.Error(f"Falha na montagem final do vídeo. Detalhes: {e.stderr}") + return output_path + + def generate_full_movie(self, + keyframes: list, + global_prompt: str, + storyboard: list, + seconds_per_fragment: float, + overlap_percent: int, + echo_frames: int, + handler_strength: float, + destination_convergence_strength: float, + base_ltx_params: dict, + video_resolution: int, + use_continuity_director: bool, + progress: gr.Progress = gr.Progress()): + + keyframe_paths = [item[0] if isinstance(item, tuple) else item for item in keyframes] + video_clips_paths, story_history, audio_history = [], "", "This is the beginning of the film." + target_resolution_tuple = (video_resolution, video_resolution) + n_trim_latents = 24 #self._quantize_to_multiple(int(seconds_per_fragment * 24 * (overlap_percent / 100.0)), 8) + echo_frames = 8 + + previous_latents_path = None + num_transitions_to_generate = len(keyframe_paths) - 1 + + for i in range(num_transitions_to_generate): + progress((i + 1) / num_transitions_to_generate, desc=f"Produzindo Transição {i+1}/{num_transitions_to_generate}") + + start_keyframe_path = keyframe_paths[i] + destination_keyframe_path = keyframe_paths[i+1] + present_scene_desc = storyboard[i] + + is_first_fragment = previous_latents_path is None + if is_first_fragment: + transition_type = "start" + motion_prompt = gemini_singleton.get_initial_motion_prompt( + global_prompt, start_keyframe_path, destination_keyframe_path, present_scene_desc + ) + else: + past_keyframe_path = keyframe_paths[i-1] + past_scene_desc = storyboard[i-1] + future_scene_desc = storyboard[i+1] if (i+1) < len(storyboard) else "A cena final." + decision = gemini_singleton.get_cinematic_decision( + global_prompt=global_prompt, story_history=story_history, + past_keyframe_path=past_keyframe_path, present_keyframe_path=start_keyframe_path, + future_keyframe_path=destination_keyframe_path, past_scene_desc=past_scene_desc, + present_scene_desc=present_scene_desc, future_scene_desc=future_scene_desc + ) + transition_type, motion_prompt = decision["transition_type"], decision["motion_prompt"] + + story_history += f"\n- Ato {i+1} ({transition_type}): {motion_prompt}" + + if use_continuity_director: # Assume-se que este checkbox controla os diretores de vídeo e som + if is_first_fragment: + audio_prompt = gemini_singleton.get_sound_director_prompt( + audio_history=audio_history, + past_keyframe_path=start_keyframe_path, present_keyframe_path=start_keyframe_path, + future_keyframe_path=destination_keyframe_path, present_scene_desc=present_scene_desc, + motion_prompt=motion_prompt, future_scene_desc=storyboard[i+1] if (i+1) < len(storyboard) else "The final scene." + ) + else: + audio_prompt = gemini_singleton.get_sound_director_prompt( + audio_history=audio_history, past_keyframe_path=keyframe_paths[i-1], + present_keyframe_path=start_keyframe_path, future_keyframe_path=destination_keyframe_path, + present_scene_desc=present_scene_desc, motion_prompt=motion_prompt, + future_scene_desc=storyboard[i+1] if (i+1) < len(storyboard) else "The final scene." + ) + else: + audio_prompt = present_scene_desc # Fallback para o prompt da cena se o diretor de som estiver desligado + + audio_history = audio_prompt + + conditioning_items = [] + current_ltx_params = {**base_ltx_params, "handler_strength": handler_strength, "motion_prompt": motion_prompt} + total_frames_to_generate = self._quantize_to_multiple(int(seconds_per_fragment * 24), 8) + 1 + + if is_first_fragment: + img_start = self._preprocess_image_for_latent_conversion(Image.open(start_keyframe_path).convert("RGB"), target_resolution_tuple) + start_latent = self.pil_to_latent(img_start) + conditioning_items.append(LatentConditioningItem(start_latent, 0, 1.0)) + if transition_type != "cut": + img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple) + destination_latent = self.pil_to_latent(img_dest) + conditioning_items.append(LatentConditioningItem(destination_latent, total_frames_to_generate - 1, destination_convergence_strength)) + else: + previous_latents = self.load_latent_tensor(previous_latents_path) + handler_latent = previous_latents[:, :, -1:, :, :] + trimmed_for_echo = previous_latents[:, :, :-n_trim_latents, :, :] if n_trim_latents > 0 and previous_latents.shape[2] > n_trim_latents else previous_latents + echo_latents = trimmed_for_echo[:, :, -echo_frames:, :, :] + handler_frame_position = n_trim_latents + echo_frames + + conditioning_items = [] + + for i, echo_latent in enumerate(echo_frames): + if i == 0: + weight = 1.0 + else: + weight = random.uniform(0.2, 0.7) + + + + conditioning_items.append(LatentConditioningItem(echo_latent, 0, weight)) + #conditioning_items.append(LatentConditioningItem(echo_latents, 0, 1.0)) + conditioning_items.append(LatentConditioningItem(handler_latent, handler_frame_position, handler_strength)) + del previous_latents, handler_latent, trimmed_for_echo, echo_latents; gc.collect() + if transition_type == "continuous": + img_dest = self._preprocess_image_for_latent_conversion(Image.open(destination_keyframe_path).convert("RGB"), target_resolution_tuple) + destination_latent = self.pil_to_latent(img_dest) + conditioning_items.append(LatentConditioningItem(destination_latent, total_frames_to_generate - 1, destination_convergence_strength)) + + new_full_latents = self._generate_latent_tensor_internal(conditioning_items, current_ltx_params, target_resolution_tuple, total_frames_to_generate) + + base_name = f"fragment_{i}_{int(time.time())}" + new_full_latents_path = os.path.join(self.workspace_dir, f"{base_name}_full.pt") + self.save_latent_tensor(new_full_latents, new_full_latents_path) + + previous_latents_path = new_full_latents_path + + latents_for_video = new_full_latents + + if not is_first_fragment: + if echo_frames > 0 and latents_for_video.shape[2] > echo_frames: latents_for_video = latents_for_video[:, :, echo_frames:, :, :] + if n_trim_latents > 0 and latents_for_video.shape[2] > n_trim_latents: latents_for_video = latents_for_video[:, :, :-n_trim_latents, :, :] + else: + if n_trim_latents > 0 and latents_for_video.shape[2] > n_trim_latents: latents_for_video = latents_for_video[:, :, :-n_trim_latents, :, :] + + video_with_audio_path = self._generate_video_and_audio_from_latents(latents_for_video, audio_prompt, base_name) + video_clips_paths.append(video_with_audio_path) + + + if transition_type == "cut": + previous_latents_path = None + + + yield {"fragment_path": video_with_audio_path} + + final_movie_path = os.path.join(self.workspace_dir, f"final_movie_{int(time.time())}.mp4") + self.concatenate_videos_ffmpeg(video_clips_paths, final_movie_path) + + logger.info(f"Filme completo salvo em: {final_movie_path}") + yield {"final_path": final_movie_path} + + def _quantize_to_multiple(self, n, m): + if m == 0: return n + quantized = int(round(n / m) * m) + return m if n > 0 and quantized == 0 else quantized \ No newline at end of file diff --git a/dreamo/LICENSE.txt b/dreamo/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/dreamo/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/dreamo/README.md b/dreamo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..964c76ea7d615f5287e9283e23df316dad8bfdd4 --- /dev/null +++ b/dreamo/README.md @@ -0,0 +1,135 @@ +# 🛠️ helpers/ - Ferramentas de IA de Terceiros Adaptadas para ADUC-SDR + +Esta pasta contém implementações adaptadas de modelos e utilitários de IA de terceiros, que servem como "especialistas" ou "ferramentas" de baixo nível para a arquitetura ADUC-SDR. + +**IMPORTANTE:** O conteúdo desta pasta é de autoria de seus respectivos idealizadores e desenvolvedores originais. Esta pasta **NÃO FAZ PARTE** do projeto principal ADUC-SDR em termos de sua arquitetura inovadora. Ela serve como um repositório para as **dependências diretas e modificadas** que os `DeformesXDEngines` (os estágios do "foguete" ADUC-SDR) invocam para realizar tarefas específicas (geração de imagem, vídeo, áudio). + +As modificações realizadas nos arquivos aqui presentes visam principalmente: +1. **Adaptação de Interfaces:** Padronizar as interfaces para que se encaixem no fluxo de orquestração do ADUC-SDR. +2. **Gerenciamento de Recursos:** Integrar lógicas de carregamento/descarregamento de modelos (GPU management) e configurações via arquivos YAML. +3. **Otimização de Fluxo:** Ajustar as pipelines para aceitar formatos de entrada mais eficientes (ex: tensores pré-codificados em vez de caminhos de mídia, pulando etapas de codificação/decodificação redundantes). + +--- + +## 📄 Licenciamento + +O conteúdo original dos projetos listados abaixo é licenciado sob a **Licença Apache 2.0**, ou outra licença especificada pelos autores originais. Todas as modificações e o uso desses arquivos dentro da estrutura `helpers/` do projeto ADUC-SDR estão em conformidade com os termos da **Licença Apache 2.0**. + +As licenças originais dos projetos podem ser encontradas nas suas respectivas fontes ou nos subdiretórios `incl_licenses/` dentro de cada módulo adaptado. + +--- + +## 🛠️ API dos Helpers e Guia de Uso + +Esta seção detalha como cada helper (agente especialista) deve ser utilizado dentro do ecossistema ADUC-SDR. Todos os agentes são instanciados como **singletons** no `hardware_manager.py` para garantir o gerenciamento centralizado de recursos de GPU. + +### **gemini_helpers.py (GeminiAgent)** + +* **Propósito:** Atua como o "Oráculo de Síntese Adaptativo", responsável por todas as tarefas de processamento de linguagem natural, como criação de storyboards, geração de prompts, e tomada de decisões narrativas. +* **Singleton Instance:** `gemini_agent_singleton` +* **Construtor:** `GeminiAgent()` + * Lê `configs/gemini_config.yaml` para obter o nome do modelo, parâmetros de inferência e caminhos de templates de prompt. A chave da API é lida da variável de ambiente `GEMINI_API_KEY`. +* **Métodos Públicos:** + * `generate_storyboard(prompt: str, num_keyframes: int, ref_image_paths: list[str])` + * **Inputs:** + * `prompt`: A ideia geral do filme (string). + * `num_keyframes`: O número de cenas a serem geradas (int). + * `ref_image_paths`: Lista de caminhos para as imagens de referência (list[str]). + * **Output:** `tuple[list[str], str]` (Uma tupla contendo a lista de strings do storyboard e um relatório textual da operação). + * `select_keyframes_from_pool(storyboard: list, base_image_paths: list[str], pool_image_paths: list[str])` + * **Inputs:** + * `storyboard`: A lista de strings do storyboard gerado. + * `base_image_paths`: Imagens de referência base (list[str]). + * `pool_image_paths`: O "banco de imagens" de onde selecionar (list[str]). + * **Output:** `tuple[list[str], str]` (Uma tupla contendo a lista de caminhos de imagens selecionadas e um relatório textual). + * `get_anticipatory_keyframe_prompt(...)` + * **Inputs:** Contexto narrativo e visual para gerar um prompt de imagem. + * **Output:** `tuple[str, str]` (Uma tupla contendo o prompt gerado para o modelo de imagem e um relatório textual). + * `get_initial_motion_prompt(...)` + * **Inputs:** Contexto narrativo e visual para a primeira transição de vídeo. + * **Output:** `tuple[str, str]` (Uma tupla contendo o prompt de movimento gerado e um relatório textual). + * `get_transition_decision(...)` + * **Inputs:** Contexto narrativo e visual para uma transição de vídeo intermediária. + * **Output:** `tuple[dict, str]` (Uma tupla contendo um dicionário `{"transition_type": "...", "motion_prompt": "..."}` e um relatório textual). + * `generate_audio_prompts(...)` + * **Inputs:** Contexto narrativo global. + * **Output:** `tuple[dict, str]` (Uma tupla contendo um dicionário `{"music_prompt": "...", "sfx_prompt": "..."}` e um relatório textual). + +### **flux_kontext_helpers.py (FluxPoolManager)** + +* **Propósito:** Especialista em geração de imagens de alta qualidade (keyframes) usando a pipeline FluxKontext. Gerencia um pool de workers para otimizar o uso de múltiplas GPUs. +* **Singleton Instance:** `flux_kontext_singleton` +* **Construtor:** `FluxPoolManager(device_ids: list[str], flux_config_file: str)` + * Lê `configs/flux_config.yaml`. +* **Método Público:** + * `generate_image(prompt: str, reference_images: list[Image.Image], width: int, height: int, seed: int = 42, callback: callable = None)` + * **Inputs:** + * `prompt`: Prompt textual para guiar a geração (string). + * `reference_images`: Lista de objetos `PIL.Image` como referência visual. + * `width`, `height`: Dimensões da imagem de saída (int). + * `seed`: Semente para reprodutibilidade (int). + * `callback`: Função de callback opcional para monitorar o progresso. + * **Output:** `PIL.Image.Image` (O objeto da imagem gerada). + +### **dreamo_helpers.py (DreamOAgent)** + +* **Propósito:** Especialista em geração de imagens de alta qualidade (keyframes) usando a pipeline DreamO, com capacidades avançadas de edição e estilo a partir de referências. +* **Singleton Instance:** `dreamo_agent_singleton` +* **Construtor:** `DreamOAgent(device_id: str = None)` + * Lê `configs/dreamo_config.yaml`. +* **Método Público:** + * `generate_image(prompt: str, reference_images: list[Image.Image], width: int, height: int)` + * **Inputs:** + * `prompt`: Prompt textual para guiar a geração (string). + * `reference_images`: Lista de objetos `PIL.Image` como referência visual. A lógica interna atribui a primeira imagem como `style` e as demais como `ip`. + * `width`, `height`: Dimensões da imagem de saída (int). + * **Output:** `PIL.Image.Image` (O objeto da imagem gerada). + +### **ltx_manager_helpers.py (LtxPoolManager)** + +* **Propósito:** Especialista na geração de fragmentos de vídeo no espaço latente usando a pipeline LTX-Video. Gerencia um pool de workers para otimizar o uso de múltiplas GPUs. +* **Singleton Instance:** `ltx_manager_singleton` +* **Construtor:** `LtxPoolManager(device_ids: list[str], ltx_model_config_file: str, ltx_global_config_file: str)` + * Lê o `ltx_global_config_file` e o `ltx_model_config_file` para configurar a pipeline. +* **Método Público:** + * `generate_latent_fragment(**kwargs)` + * **Inputs:** Dicionário de keyword arguments (`kwargs`) contendo todos os parâmetros da pipeline LTX, incluindo: + * `height`, `width`: Dimensões do vídeo (int). + * `video_total_frames`: Número total de frames a serem gerados (int). + * `video_fps`: Frames por segundo (int). + * `motion_prompt`: Prompt de movimento (string). + * `conditioning_items_data`: Lista de objetos `LatentConditioningItem` contendo os tensores latentes de condição. + * `guidance_scale`, `stg_scale`, `num_inference_steps`, etc. + * **Output:** `tuple[torch.Tensor, tuple]` (Uma tupla contendo o tensor latente gerado e os valores de padding utilizados). + +### **mmaudio_helper.py (MMAudioAgent)** + +* **Propósito:** Especialista em geração de áudio para um determinado fragmento de vídeo. +* **Singleton Instance:** `mmaudio_agent_singleton` +* **Construtor:** `MMAudioAgent(workspace_dir: str, device_id: str = None, mmaudio_config_file: str)` + * Lê `configs/mmaudio_config.yaml`. +* **Método Público:** + * `generate_audio_for_video(video_path: str, prompt: str, negative_prompt: str, duration_seconds: float)` + * **Inputs:** + * `video_path`: Caminho para o arquivo de vídeo silencioso (string). + * `prompt`: Prompt textual para guiar a geração de áudio (string). + * `negative_prompt`: Prompt negativo para áudio (string). + * `duration_seconds`: Duração exata do vídeo (float). + * **Output:** `str` (O caminho para o novo arquivo de vídeo com a faixa de áudio integrada). + +--- + +## 🔗 Projetos Originais e Atribuições +(A seção de atribuições e licenças permanece a mesma que definimos anteriormente) + +### DreamO +* **Repositório Original:** [https://github.com/bytedance/DreamO](https://github.com/bytedance/DreamO) +... + +### LTX-Video +* **Repositório Original:** [https://github.com/Lightricks/LTX-Video](https://github.com/Lightricks/LTX-Video) +... + +### MMAudio +* **Repositório Original:** [https://github.com/hkchengrex/MMAudio](https://github.com/hkchengrex/MMAudio) +... \ No newline at end of file diff --git a/dreamo/dreamo_pipeline.py b/dreamo/dreamo_pipeline.py new file mode 100644 index 0000000000000000000000000000000000000000..6b4a017a0bdb26c2d72c4173b6d6da79d2fa03fc --- /dev/null +++ b/dreamo/dreamo_pipeline.py @@ -0,0 +1,507 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates +# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Callable, Dict, List, Optional, Union + +import diffusers +import numpy as np +import torch +import torch.nn as nn +from diffusers import FluxPipeline +from diffusers.pipelines.flux.pipeline_flux import calculate_shift, retrieve_timesteps +from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput +from einops import repeat +from huggingface_hub import hf_hub_download +from safetensors.torch import load_file + +from dreamo.transformer import flux_transformer_forward +from dreamo.utils import convert_flux_lora_to_diffusers + +diffusers.models.transformers.transformer_flux.FluxTransformer2DModel.forward = flux_transformer_forward + + +def get_task_embedding_idx(task): + return 0 + + +class DreamOPipeline(FluxPipeline): + def __init__(self, scheduler, vae, text_encoder, tokenizer, text_encoder_2, tokenizer_2, transformer): + super().__init__(scheduler, vae, text_encoder, tokenizer, text_encoder_2, tokenizer_2, transformer) + self.t5_embedding = nn.Embedding(10, 4096) + self.task_embedding = nn.Embedding(2, 3072) + self.idx_embedding = nn.Embedding(10, 3072) + + def load_dreamo_model(self, device, use_turbo=True, version='v1.1'): + # download models and load file + hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo.safetensors', local_dir='models') + hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo_cfg_distill.safetensors', local_dir='models') + if version == 'v1': + hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo_quality_lora_pos.safetensors', + local_dir='models') + hf_hub_download(repo_id='ByteDance/DreamO', filename='dreamo_quality_lora_neg.safetensors', + local_dir='models') + quality_lora_pos = load_file('models/dreamo_quality_lora_pos.safetensors') + quality_lora_neg = load_file('models/dreamo_quality_lora_neg.safetensors') + elif version == 'v1.1': + hf_hub_download(repo_id='ByteDance/DreamO', filename='v1.1/dreamo_sft_lora.safetensors', local_dir='models') + hf_hub_download(repo_id='ByteDance/DreamO', filename='v1.1/dreamo_dpo_lora.safetensors', local_dir='models') + sft_lora = load_file('models/v1.1/dreamo_sft_lora.safetensors') + dpo_lora = load_file('models/v1.1/dreamo_dpo_lora.safetensors') + else: + raise ValueError(f'there is no {version}') + dreamo_lora = load_file('models/dreamo.safetensors') + cfg_distill_lora = load_file('models/dreamo_cfg_distill.safetensors') + + # load embedding + self.t5_embedding.weight.data = dreamo_lora.pop('dreamo_t5_embedding.weight')[-10:] + self.task_embedding.weight.data = dreamo_lora.pop('dreamo_task_embedding.weight') + self.idx_embedding.weight.data = dreamo_lora.pop('dreamo_idx_embedding.weight') + self._prepare_t5() + + # main lora + dreamo_diffuser_lora = convert_flux_lora_to_diffusers(dreamo_lora) + adapter_names = ['dreamo'] + adapter_weights = [1] + self.load_lora_weights(dreamo_diffuser_lora, adapter_name='dreamo') + + # cfg lora to avoid true image cfg + cfg_diffuser_lora = convert_flux_lora_to_diffusers(cfg_distill_lora) + self.load_lora_weights(cfg_diffuser_lora, adapter_name='cfg') + adapter_names.append('cfg') + adapter_weights.append(1) + + # turbo lora to speed up (from 25+ step to 12 step) + if use_turbo: + self.load_lora_weights( + hf_hub_download( + "alimama-creative/FLUX.1-Turbo-Alpha", "diffusion_pytorch_model.safetensors", local_dir='models' + ), + adapter_name='turbo', + ) + adapter_names.append('turbo') + adapter_weights.append(1) + + if version == 'v1': + # quality loras, one pos, one neg + quality_lora_pos = convert_flux_lora_to_diffusers(quality_lora_pos) + self.load_lora_weights(quality_lora_pos, adapter_name='quality_pos') + adapter_names.append('quality_pos') + adapter_weights.append(0.15) + quality_lora_neg = convert_flux_lora_to_diffusers(quality_lora_neg) + self.load_lora_weights(quality_lora_neg, adapter_name='quality_neg') + adapter_names.append('quality_neg') + adapter_weights.append(-0.8) + elif version == 'v1.1': + self.load_lora_weights(sft_lora, adapter_name='sft_lora') + adapter_names.append('sft_lora') + adapter_weights.append(1) + self.load_lora_weights(dpo_lora, adapter_name='dpo_lora') + adapter_names.append('dpo_lora') + adapter_weights.append(1.25) + + self.set_adapters(adapter_names, adapter_weights) + self.fuse_lora(adapter_names=adapter_names, lora_scale=1) + self.unload_lora_weights() + + self.t5_embedding = self.t5_embedding.to(device) + self.task_embedding = self.task_embedding.to(device) + self.idx_embedding = self.idx_embedding.to(device) + + def _prepare_t5(self): + self.text_encoder_2.resize_token_embeddings(len(self.tokenizer_2)) + num_new_token = 10 + new_token_list = [f"[ref#{i}]" for i in range(1, 10)] + ["[res]"] + self.tokenizer_2.add_tokens(new_token_list, special_tokens=False) + self.text_encoder_2.resize_token_embeddings(len(self.tokenizer_2)) + input_embedding = self.text_encoder_2.get_input_embeddings().weight.data + input_embedding[-num_new_token:] = self.t5_embedding.weight.data + + @staticmethod + def _prepare_latent_image_ids(batch_size, height, width, device, dtype, start_height=0, start_width=0): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height // 2)[:, None] + start_height + latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width // 2)[None, :] + start_width + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1) + latent_image_ids = latent_image_ids.reshape( + batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @staticmethod + def _prepare_style_latent_image_ids(batch_size, height, width, device, dtype, start_height=0, start_width=0): + latent_image_ids = torch.zeros(height // 2, width // 2, 3) + latent_image_ids[..., 1] = latent_image_ids[..., 1] + start_height + latent_image_ids[..., 2] = latent_image_ids[..., 2] + start_width + + latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape + + latent_image_ids = latent_image_ids[None, :].repeat(batch_size, 1, 1, 1) + latent_image_ids = latent_image_ids.reshape( + batch_size, latent_image_id_height * latent_image_id_width, latent_image_id_channels + ) + + return latent_image_ids.to(device=device, dtype=dtype) + + @torch.no_grad() + def __call__( + self, + prompt: Union[str, List[str]] = None, + prompt_2: Optional[Union[str, List[str]]] = None, + negative_prompt: Union[str, List[str]] = None, + negative_prompt_2: Optional[Union[str, List[str]]] = None, + true_cfg_scale: float = 1.0, + true_cfg_start_step: int = 1, + true_cfg_end_step: int = 1, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: int = 28, + sigmas: Optional[List[float]] = None, + guidance_scale: float = 3.5, + neg_guidance_scale: float = 3.5, + num_images_per_prompt: Optional[int] = 1, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + callback_on_step_end_tensor_inputs: List[str] = ["latents"], + max_sequence_length: int = 512, + ref_conds=None, + first_step_guidance_scale=3.5, + ): + r""" + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is + will be used instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `true_cfg_scale` is + not greater than `1`). + negative_prompt_2 (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and + `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders. + true_cfg_scale (`float`, *optional*, defaults to 1.0): + When > 1.0 and a provided `negative_prompt`, enables true classifier-free guidance. + height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The height in pixels of the generated image. This is set to 1024 by default for the best results. + width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): + The width in pixels of the generated image. This is set to 1024 by default for the best results. + num_inference_steps (`int`, *optional*, defaults to 50): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + sigmas (`List[float]`, *optional*): + Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in + their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed + will be used. + guidance_scale (`float`, *optional*, defaults to 3.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. + If not provided, pooled text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input + argument. + negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt + weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` + input argument. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + callback_on_step_end_tensor_inputs (`List`, *optional*): + The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list + will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the + `._callback_tensor_inputs` attribute of your pipeline class. + max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`. + + Examples: + + Returns: + [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict` + is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated + images. + """ + + height = height or self.default_sample_size * self.vae_scale_factor + width = width or self.default_sample_size * self.vae_scale_factor + + # 1. Check inputs. Raise error if not correct + self.check_inputs( + prompt, + prompt_2, + height, + width, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs, + max_sequence_length=max_sequence_length, + ) + + self._guidance_scale = guidance_scale + self._joint_attention_kwargs = joint_attention_kwargs + self._current_timestep = None + self._interrupt = False + + # 2. Define call parameters + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + lora_scale = ( + self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None + ) + has_neg_prompt = negative_prompt is not None or ( + negative_prompt_embeds is not None and negative_pooled_prompt_embeds is not None + ) + do_true_cfg = true_cfg_scale > 1 and has_neg_prompt + ( + prompt_embeds, + pooled_prompt_embeds, + text_ids, + ) = self.encode_prompt( + prompt=prompt, + prompt_2=prompt_2, + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + if do_true_cfg: + ( + negative_prompt_embeds, + negative_pooled_prompt_embeds, + _, + ) = self.encode_prompt( + prompt=negative_prompt, + prompt_2=negative_prompt_2, + prompt_embeds=negative_prompt_embeds, + pooled_prompt_embeds=negative_pooled_prompt_embeds, + device=device, + num_images_per_prompt=num_images_per_prompt, + max_sequence_length=max_sequence_length, + lora_scale=lora_scale, + ) + + # 4. Prepare latent variables + num_channels_latents = self.transformer.config.in_channels // 4 + latents, latent_image_ids = self.prepare_latents( + batch_size * num_images_per_prompt, + num_channels_latents, + height, + width, + prompt_embeds.dtype, + device, + generator, + latents, + ) + + # 4.1 concat ref tokens to latent + origin_img_len = latents.shape[1] + embeddings = repeat(self.task_embedding.weight[1], "c -> n l c", n=batch_size, l=origin_img_len) + ref_latents = [] + ref_latent_image_idss = [] + start_height = height // 16 + start_width = width // 16 + for ref_cond in ref_conds: + img = ref_cond['img'] # [b, 3, h, w], range [-1, 1] + task = ref_cond['task'] + idx = ref_cond['idx'] + + # encode ref with VAE + img = img.to(latents) + ref_latent = self.vae.encode(img).latent_dist.sample() + ref_latent = (ref_latent - self.vae.config.shift_factor) * self.vae.config.scaling_factor + cur_height = ref_latent.shape[2] + cur_width = ref_latent.shape[3] + ref_latent = self._pack_latents(ref_latent, batch_size, num_channels_latents, cur_height, cur_width) + ref_latent_image_ids = self._prepare_latent_image_ids( + batch_size, cur_height, cur_width, device, prompt_embeds.dtype, start_height, start_width + ) + start_height += cur_height // 2 + start_width += cur_width // 2 + + # prepare task_idx_embedding + task_idx = get_task_embedding_idx(task) + cur_task_embedding = repeat( + self.task_embedding.weight[task_idx], "c -> n l c", n=batch_size, l=ref_latent.shape[1] + ) + cur_idx_embedding = repeat( + self.idx_embedding.weight[idx], "c -> n l c", n=batch_size, l=ref_latent.shape[1] + ) + cur_embedding = cur_task_embedding + cur_idx_embedding + + # concat ref to latent + embeddings = torch.cat([embeddings, cur_embedding], dim=1) + ref_latents.append(ref_latent) + ref_latent_image_idss.append(ref_latent_image_ids) + + # 5. Prepare timesteps + sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas + image_seq_len = latents.shape[1] + mu = calculate_shift( + image_seq_len, + self.scheduler.config.get("base_image_seq_len", 256), + self.scheduler.config.get("max_image_seq_len", 4096), + self.scheduler.config.get("base_shift", 0.5), + self.scheduler.config.get("max_shift", 1.15), + ) + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + sigmas=sigmas, + mu=mu, + ) + num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) + self._num_timesteps = len(timesteps) + + # handle guidance + if self.transformer.config.guidance_embeds: + guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32) + guidance = guidance.expand(latents.shape[0]) + else: + guidance = None + neg_guidance = torch.full([1], neg_guidance_scale, device=device, dtype=torch.float32) + neg_guidance = neg_guidance.expand(latents.shape[0]) + first_step_guidance = torch.full([1], first_step_guidance_scale, device=device, dtype=torch.float32) + + if self.joint_attention_kwargs is None: + self._joint_attention_kwargs = {} + + # 6. Denoising loop + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + if self.interrupt: + continue + + self._current_timestep = t + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + timestep = t.expand(latents.shape[0]).to(latents.dtype) + + noise_pred = self.transformer( + hidden_states=torch.cat((latents, *ref_latents), dim=1), + timestep=timestep / 1000, + guidance=guidance if i > 0 else first_step_guidance, + pooled_projections=pooled_prompt_embeds, + encoder_hidden_states=prompt_embeds, + txt_ids=text_ids, + img_ids=torch.cat((latent_image_ids, *ref_latent_image_idss), dim=1), + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + embeddings=embeddings, + )[0][:, :origin_img_len] + + if do_true_cfg and i >= true_cfg_start_step and i < true_cfg_end_step: + neg_noise_pred = self.transformer( + hidden_states=latents, + timestep=timestep / 1000, + guidance=neg_guidance, + pooled_projections=negative_pooled_prompt_embeds, + encoder_hidden_states=negative_prompt_embeds, + txt_ids=text_ids, + img_ids=latent_image_ids, + joint_attention_kwargs=self.joint_attention_kwargs, + return_dict=False, + )[0] + noise_pred = neg_noise_pred + true_cfg_scale * (noise_pred - neg_noise_pred) + + # compute the previous noisy sample x_t -> x_t-1 + latents_dtype = latents.dtype + latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0] + + if latents.dtype != latents_dtype and torch.backends.mps.is_available(): + # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 + latents = latents.to(latents_dtype) + + if callback_on_step_end is not None: + callback_kwargs = {} + for k in callback_on_step_end_tensor_inputs: + callback_kwargs[k] = locals()[k] + callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) + + latents = callback_outputs.pop("latents", latents) + prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) + + # call the callback, if provided + if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): + progress_bar.update() + + self._current_timestep = None + + if output_type == "latent": + image = latents + else: + latents = self._unpack_latents(latents, height, width, self.vae_scale_factor) + latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor + image = self.vae.decode(latents, return_dict=False)[0] + image = self.image_processor.postprocess(image, output_type=output_type) + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return FluxPipelineOutput(images=image) diff --git a/dreamo/transformer.py b/dreamo/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..6fcdb05892d32ffc376e66d331577dcfd0d2a511 --- /dev/null +++ b/dreamo/transformer.py @@ -0,0 +1,187 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates +# Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Any, Dict, Optional, Union + +import numpy as np +import torch +from diffusers.models.modeling_outputs import Transformer2DModelOutput +from diffusers.utils import ( + USE_PEFT_BACKEND, + logging, + scale_lora_layers, + unscale_lora_layers, +) + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +def flux_transformer_forward( + self, + hidden_states: torch.Tensor, + encoder_hidden_states: torch.Tensor = None, + pooled_projections: torch.Tensor = None, + timestep: torch.LongTensor = None, + img_ids: torch.Tensor = None, + txt_ids: torch.Tensor = None, + guidance: torch.Tensor = None, + joint_attention_kwargs: Optional[Dict[str, Any]] = None, + controlnet_block_samples=None, + controlnet_single_block_samples=None, + return_dict: bool = True, + controlnet_blocks_repeat: bool = False, + embeddings: torch.Tensor = None, +) -> Union[torch.Tensor, Transformer2DModelOutput]: + """ + The [`FluxTransformer2DModel`] forward method. + + Args: + hidden_states (`torch.Tensor` of shape `(batch_size, image_sequence_length, in_channels)`): + Input `hidden_states`. + encoder_hidden_states (`torch.Tensor` of shape `(batch_size, text_sequence_length, joint_attention_dim)`): + Conditional embeddings (embeddings computed from the input conditions such as prompts) to use. + pooled_projections (`torch.Tensor` of shape `(batch_size, projection_dim)`): Embeddings projected + from the embeddings of input conditions. + timestep ( `torch.LongTensor`): + Used to indicate denoising step. + block_controlnet_hidden_states: (`list` of `torch.Tensor`): + A list of tensors that if specified are added to the residuals of transformer blocks. + joint_attention_kwargs (`dict`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.transformer_2d.Transformer2DModelOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + if joint_attention_kwargs is not None: + joint_attention_kwargs = joint_attention_kwargs.copy() + lora_scale = joint_attention_kwargs.pop("scale", 1.0) + else: + lora_scale = 1.0 + + if USE_PEFT_BACKEND: + # weight the lora layers by setting `lora_scale` for each PEFT layer + scale_lora_layers(self, lora_scale) + else: + if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None: + logger.warning( + "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective." + ) + + hidden_states = self.x_embedder(hidden_states) + # add task and idx embedding + if embeddings is not None: + hidden_states = hidden_states + embeddings + + timestep = timestep.to(hidden_states.dtype) * 1000 + guidance = guidance.to(hidden_states.dtype) * 1000 if guidance is not None else None + + temb = ( + self.time_text_embed(timestep, pooled_projections) + if guidance is None + else self.time_text_embed(timestep, guidance, pooled_projections) + ) + encoder_hidden_states = self.context_embedder(encoder_hidden_states) + + if txt_ids.ndim == 3: + # logger.warning( + # "Passing `txt_ids` 3d torch.Tensor is deprecated." + # "Please remove the batch dimension and pass it as a 2d torch Tensor" + # ) + txt_ids = txt_ids[0] + if img_ids.ndim == 3: + # logger.warning( + # "Passing `img_ids` 3d torch.Tensor is deprecated." + # "Please remove the batch dimension and pass it as a 2d torch Tensor" + # ) + img_ids = img_ids[0] + + ids = torch.cat((txt_ids, img_ids), dim=0) + image_rotary_emb = self.pos_embed(ids) + + for index_block, block in enumerate(self.transformer_blocks): + if torch.is_grad_enabled() and self.gradient_checkpointing: + encoder_hidden_states, hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + encoder_hidden_states, + temb, + image_rotary_emb, + ) + + else: + encoder_hidden_states, hidden_states = block( + hidden_states=hidden_states, + encoder_hidden_states=encoder_hidden_states, + temb=temb, + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + + # controlnet residual + if controlnet_block_samples is not None: + interval_control = len(self.transformer_blocks) / len(controlnet_block_samples) + interval_control = int(np.ceil(interval_control)) + # For Xlabs ControlNet. + if controlnet_blocks_repeat: + hidden_states = hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)] + else: + hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control] + hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1) + + for index_block, block in enumerate(self.single_transformer_blocks): + if torch.is_grad_enabled() and self.gradient_checkpointing: + hidden_states = self._gradient_checkpointing_func( + block, + hidden_states, + temb, + image_rotary_emb, + ) + + else: + hidden_states = block( + hidden_states=hidden_states, + temb=temb, + image_rotary_emb=image_rotary_emb, + joint_attention_kwargs=joint_attention_kwargs, + ) + + # controlnet residual + if controlnet_single_block_samples is not None: + interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples) + interval_control = int(np.ceil(interval_control)) + hidden_states[:, encoder_hidden_states.shape[1] :, ...] = ( + hidden_states[:, encoder_hidden_states.shape[1] :, ...] + + controlnet_single_block_samples[index_block // interval_control] + ) + + hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...] + + hidden_states = self.norm_out(hidden_states, temb) + output = self.proj_out(hidden_states) + + if USE_PEFT_BACKEND: + # remove `lora_scale` from each PEFT layer + unscale_lora_layers(self, lora_scale) + + if not return_dict: + return (output,) + + return Transformer2DModelOutput(sample=output) diff --git a/dreamo/utils.py b/dreamo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f2090852bf8371aa758c2c443ba3fc112055d67f --- /dev/null +++ b/dreamo/utils.py @@ -0,0 +1,232 @@ +# Copyright (c) 2025 Bytedance Ltd. and/or its affiliates +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import math +import re + +import cv2 +import numpy as np +import torch +from torchvision.utils import make_grid + + +# from basicsr +def img2tensor(imgs, bgr2rgb=True, float32=True): + """Numpy array to tensor. + + Args: + imgs (list[ndarray] | ndarray): Input images. + bgr2rgb (bool): Whether to change bgr to rgb. + float32 (bool): Whether to change to float32. + + Returns: + list[tensor] | tensor: Tensor images. If returned results only have + one element, just return tensor. + """ + + def _totensor(img, bgr2rgb, float32): + if img.shape[2] == 3 and bgr2rgb: + if img.dtype == 'float64': + img = img.astype('float32') + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + img = torch.from_numpy(img.transpose(2, 0, 1)) + if float32: + img = img.float() + return img + + if isinstance(imgs, list): + return [_totensor(img, bgr2rgb, float32) for img in imgs] + return _totensor(imgs, bgr2rgb, float32) + + +def tensor2img(tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1)): + """Convert torch Tensors into image numpy arrays. + + After clamping to [min, max], values will be normalized to [0, 1]. + + Args: + tensor (Tensor or list[Tensor]): Accept shapes: + 1) 4D mini-batch Tensor of shape (B x 3/1 x H x W); + 2) 3D Tensor of shape (3/1 x H x W); + 3) 2D Tensor of shape (H x W). + Tensor channel should be in RGB order. + rgb2bgr (bool): Whether to change rgb to bgr. + out_type (numpy type): output types. If ``np.uint8``, transform outputs + to uint8 type with range [0, 255]; otherwise, float type with + range [0, 1]. Default: ``np.uint8``. + min_max (tuple[int]): min and max values for clamp. + + Returns: + (Tensor or list): 3D ndarray of shape (H x W x C) OR 2D ndarray of + shape (H x W). The channel order is BGR. + """ + if not (torch.is_tensor(tensor) or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') + + if torch.is_tensor(tensor): + tensor = [tensor] + result = [] + for _tensor in tensor: + _tensor = _tensor.squeeze(0).float().detach().cpu().clamp_(*min_max) + _tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0]) + + n_dim = _tensor.dim() + if n_dim == 4: + img_np = make_grid(_tensor, nrow=int(math.sqrt(_tensor.size(0))), normalize=False).numpy() + img_np = img_np.transpose(1, 2, 0) + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 3: + img_np = _tensor.numpy() + img_np = img_np.transpose(1, 2, 0) + if img_np.shape[2] == 1: # gray image + img_np = np.squeeze(img_np, axis=2) + else: + if rgb2bgr: + img_np = cv2.cvtColor(img_np, cv2.COLOR_RGB2BGR) + elif n_dim == 2: + img_np = _tensor.numpy() + else: + raise TypeError(f'Only support 4D, 3D or 2D tensor. But received with dimension: {n_dim}') + if out_type == np.uint8: + # Unlike MATLAB, numpy.unit8() WILL NOT round by default. + img_np = (img_np * 255.0).round() + img_np = img_np.astype(out_type) + result.append(img_np) + if len(result) == 1: + result = result[0] + return result + + +def resize_numpy_image_area(image, area=512 * 512): + h, w = image.shape[:2] + k = math.sqrt(area / (h * w)) + h = int(h * k) - (int(h * k) % 16) + w = int(w * k) - (int(w * k) % 16) + image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) + return image + +def resize_numpy_image_long(image, long_edge=768): + h, w = image.shape[:2] + if max(h, w) <= long_edge: + return image + k = long_edge / max(h, w) + h = int(h * k) + w = int(w * k) + image = cv2.resize(image, (w, h), interpolation=cv2.INTER_AREA) + return image + + +# reference: https://github.com/huggingface/diffusers/pull/9295/files +def convert_flux_lora_to_diffusers(old_state_dict): + new_state_dict = {} + orig_keys = list(old_state_dict.keys()) + + def handle_qkv(sds_sd, ait_sd, sds_key, ait_keys, dims=None): + down_weight = sds_sd.pop(sds_key) + up_weight = sds_sd.pop(sds_key.replace(".down.weight", ".up.weight")) + + # calculate dims if not provided + num_splits = len(ait_keys) + if dims is None: + dims = [up_weight.shape[0] // num_splits] * num_splits + else: + assert sum(dims) == up_weight.shape[0] + + # make ai-toolkit weight + ait_down_keys = [k + ".lora_A.weight" for k in ait_keys] + ait_up_keys = [k + ".lora_B.weight" for k in ait_keys] + + # down_weight is copied to each split + ait_sd.update({k: down_weight for k in ait_down_keys}) + + # up_weight is split to each split + ait_sd.update({k: v for k, v in zip(ait_up_keys, torch.split(up_weight, dims, dim=0))}) # noqa: C416 + + for old_key in orig_keys: + # Handle double_blocks + if 'double_blocks' in old_key: + block_num = re.search(r"double_blocks_(\d+)", old_key).group(1) + new_key = f"transformer.transformer_blocks.{block_num}" + + if "proj_lora1" in old_key: + new_key += ".attn.to_out.0" + elif "proj_lora2" in old_key: + new_key += ".attn.to_add_out" + elif "qkv_lora2" in old_key and "up" not in old_key: + handle_qkv( + old_state_dict, + new_state_dict, + old_key, + [ + f"transformer.transformer_blocks.{block_num}.attn.add_q_proj", + f"transformer.transformer_blocks.{block_num}.attn.add_k_proj", + f"transformer.transformer_blocks.{block_num}.attn.add_v_proj", + ], + ) + # continue + elif "qkv_lora1" in old_key and "up" not in old_key: + handle_qkv( + old_state_dict, + new_state_dict, + old_key, + [ + f"transformer.transformer_blocks.{block_num}.attn.to_q", + f"transformer.transformer_blocks.{block_num}.attn.to_k", + f"transformer.transformer_blocks.{block_num}.attn.to_v", + ], + ) + # continue + + if "down" in old_key: + new_key += ".lora_A.weight" + elif "up" in old_key: + new_key += ".lora_B.weight" + + # Handle single_blocks + elif 'single_blocks' in old_key: + block_num = re.search(r"single_blocks_(\d+)", old_key).group(1) + new_key = f"transformer.single_transformer_blocks.{block_num}" + + if "proj_lora" in old_key: + new_key += ".proj_out" + elif "qkv_lora" in old_key and "up" not in old_key: + handle_qkv( + old_state_dict, + new_state_dict, + old_key, + [ + f"transformer.single_transformer_blocks.{block_num}.attn.to_q", + f"transformer.single_transformer_blocks.{block_num}.attn.to_k", + f"transformer.single_transformer_blocks.{block_num}.attn.to_v", + ], + ) + + if "down" in old_key: + new_key += ".lora_A.weight" + elif "up" in old_key: + new_key += ".lora_B.weight" + + else: + # Handle other potential key patterns here + new_key = old_key + + # Since we already handle qkv above. + if "qkv" not in old_key and 'embedding' not in old_key: + new_state_dict[new_key] = old_state_dict.pop(old_key) + + # if len(old_state_dict) > 0: + # raise ValueError(f"`old_state_dict` should be at this point but has: {list(old_state_dict.keys())}.") + + return new_state_dict diff --git a/flux_kontext_helpers.py b/flux_kontext_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..b008f711cdb721becb466eef9b779f6ae75f1c94 --- /dev/null +++ b/flux_kontext_helpers.py @@ -0,0 +1,151 @@ +# flux_kontext_helpers.py (ADUC: O Especialista Pintor - com suporte a callback) +# Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos + +import torch +from PIL import Image, ImageOps +import gc +from diffusers import FluxKontextPipeline +import huggingface_hub +import os +import threading +import yaml +import logging + +from hardware_manager import hardware_manager + +logger = logging.getLogger(__name__) + +class FluxWorker: + """Representa uma única instância do pipeline FluxKontext em um dispositivo.""" + def __init__(self, device_id='cuda:0'): + self.cpu_device = torch.device('cpu') + self.device = torch.device(device_id if torch.cuda.is_available() else 'cpu') + self.pipe = None + self._load_pipe_to_cpu() + + def _load_pipe_to_cpu(self): + if self.pipe is None: + logger.info(f"FLUX Worker ({self.device}): Carregando modelo para a CPU...") + self.pipe = FluxKontextPipeline.from_pretrained( + "black-forest-labs/FLUX.1-Kontext-dev", torch_dtype=torch.bfloat16 + ).to(self.cpu_device) + logger.info(f"FLUX Worker ({self.device}): Modelo pronto na CPU.") + + def to_gpu(self): + if self.device.type == 'cpu': return + logger.info(f"FLUX Worker: Movendo modelo para a GPU {self.device}...") + self.pipe.to(self.device) + + def to_cpu(self): + if self.device.type == 'cpu': return + logger.info(f"FLUX Worker: Descarregando modelo da GPU {self.device}...") + self.pipe.to(self.cpu_device) + gc.collect() + if torch.cuda.is_available(): torch.cuda.empty_cache() + + def _create_composite_reference(self, images: list[Image.Image], target_width: int, target_height: int) -> Image.Image: + if not images: return None + valid_images = [img.convert("RGB") for img in images if img is not None] + if not valid_images: return None + if len(valid_images) == 1: + if valid_images[0].size != (target_width, target_height): + return ImageOps.fit(valid_images[0], (target_width, target_height), Image.Resampling.LANCZOS) + return valid_images[0] + + base_height = valid_images[0].height + resized_for_concat = [] + for img in valid_images: + if img.height != base_height: + aspect_ratio = img.width / img.height + new_width = int(base_height * aspect_ratio) + resized_for_concat.append(img.resize((new_width, base_height), Image.Resampling.LANCZOS)) + else: + resized_for_concat.append(img) + + total_width = sum(img.width for img in resized_for_concat) + concatenated = Image.new('RGB', (total_width, base_height)) + x_offset = 0 + for img in resized_for_concat: + concatenated.paste(img, (x_offset, 0)) + x_offset += img.width + + final_reference = ImageOps.fit(concatenated, (target_width, target_height), Image.Resampling.LANCZOS) + return final_reference + + @torch.inference_mode() + def generate_image_internal(self, reference_images: list[Image.Image], prompt: str, target_width: int, target_height: int, seed: int, callback: callable = None): + composite_reference = self._create_composite_reference(reference_images, target_width, target_height) + + num_steps = 30 # Valor fixo otimizado + + logger.info(f"\n===== [CHAMADA AO PIPELINE FLUX em {self.device}] =====\n" + f" - Prompt: '{prompt}'\n" + f" - Resolução: {target_width}x{target_height}, Seed: {seed}, Passos: {num_steps}\n" + f" - Nº de Imagens na Composição: {len(reference_images)}\n" + f"==========================================") + + generated_image = self.pipe( + image=composite_reference, + prompt=prompt, + guidance_scale=2.5, + width=target_width, + height=target_height, + num_inference_steps=num_steps, + generator=torch.Generator(device="cpu").manual_seed(seed), + callback_on_step_end=callback, + callback_on_step_end_tensor_inputs=["latents"] if callback else None + ).images[0] + + return generated_image + +class FluxPoolManager: + def __init__(self, device_ids): + logger.info(f"FLUX POOL MANAGER: Criando workers para os dispositivos: {device_ids}") + self.workers = [FluxWorker(device_id) for device_id in device_ids] + self.current_worker_index = 0 + self.lock = threading.Lock() + self.last_cleanup_thread = None + + def _cleanup_worker_thread(self, worker): + logger.info(f"FLUX CLEANUP THREAD: Iniciando limpeza de {worker.device} em background...") + worker.to_cpu() + + def generate_image(self, reference_images, prompt, width, height, seed=42, callback=None): + worker_to_use = None + try: + with self.lock: + if self.last_cleanup_thread and self.last_cleanup_thread.is_alive(): + self.last_cleanup_thread.join() + worker_to_use = self.workers[self.current_worker_index] + previous_worker_index = (self.current_worker_index - 1 + len(self.workers)) % len(self.workers) + worker_to_cleanup = self.workers[previous_worker_index] + cleanup_thread = threading.Thread(target=self._cleanup_worker_thread, args=(worker_to_cleanup,)) + cleanup_thread.start() + self.last_cleanup_thread = cleanup_thread + worker_to_use.to_gpu() + self.current_worker_index = (self.current_worker_index + 1) % len(self.workers) + + logger.info(f"FLUX POOL MANAGER: Gerando imagem em {worker_to_use.device}...") + return worker_to_use.generate_image_internal( + reference_images=reference_images, + prompt=prompt, + target_width=width, + target_height=height, + seed=seed, + callback=callback + ) + except Exception as e: + logger.error(f"FLUX POOL MANAGER: Erro durante a geração: {e}", exc_info=True) + raise e + finally: + pass + +# --- Instanciação Singleton Dinâmica --- +logger.info("Lendo config.yaml para inicializar o FluxKontext Pool Manager...") +with open("config.yaml", 'r') as f: config = yaml.safe_load(f) +hf_token = os.getenv('HF_TOKEN'); +if hf_token: huggingface_hub.login(token=hf_token) +flux_gpus_required = config['specialists']['flux']['gpus_required'] +flux_device_ids = hardware_manager.allocate_gpus('Flux', flux_gpus_required) +flux_kontext_singleton = FluxPoolManager(device_ids=flux_device_ids) +logger.info("Especialista de Imagem (Flux) pronto.") \ No newline at end of file diff --git a/gemini_helpers.py b/gemini_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..470c8319513179e7858de7952ecc4fcecb61760f --- /dev/null +++ b/gemini_helpers.py @@ -0,0 +1,257 @@ +# gemini_helpers.py +# Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos +# +# Este programa é software livre: você pode redistribuí-lo e/ou modificá-lo +# sob os termos da Licença Pública Geral Affero GNU como publicada pela +# Free Software Foundation, seja a versão 3 da Licença, ou +# (a seu critério) qualquer versão posterior. +# +# AVISO DE PATENTE PENDENTE: O método e sistema ADUC implementado neste +# software está em processo de patenteamento. Consulte NOTICE.md. + +import os +import logging +import json +import gradio as gr +from PIL import Image +import google.generativeai as genai +import re + +logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') +logger = logging.getLogger(__name__) + +def robust_json_parser(raw_text: str) -> dict: + clean_text = raw_text.strip() + try: + # Tenta encontrar o JSON delimitado por ```json ... ``` + match = re.search(r'```json\s*(\{.*?\})\s*```', clean_text, re.DOTALL) + if match: + json_str = match.group(1) + return json.loads(json_str) + + # Se não encontrar, tenta encontrar o primeiro '{' e o último '}' + start_index = clean_text.find('{') + end_index = clean_text.rfind('}') + if start_index != -1 and end_index != -1 and end_index > start_index: + json_str = clean_text[start_index : end_index + 1] + return json.loads(json_str) + else: + raise ValueError("Nenhum objeto JSON válido foi encontrado na resposta da IA.") + except json.JSONDecodeError as e: + logger.error(f"Falha ao decodificar JSON. A IA retornou o seguinte texto:\n---\n{raw_text}\n---") + raise ValueError(f"A IA retornou um formato de JSON inválido: {e}") + +class GeminiSingleton: + def __init__(self): + self.api_key = os.environ.get("GEMINI_API_KEY") + if self.api_key: + genai.configure(api_key=self.api_key) + # Modelo mais recente e capaz para tarefas complexas de visão e raciocínio. + self.model = genai.GenerativeModel('gemini-2.5-flash') + logger.info("Especialista Gemini (1.5 Pro) inicializado com sucesso.") + else: + self.model = None + logger.warning("Chave da API Gemini não encontrada. Especialista desativado.") + + def _check_model(self): + if not self.model: + raise gr.Error("A chave da API do Google Gemini não está configurada (GEMINI_API_KEY).") + + def _read_prompt_template(self, filename: str) -> str: + try: + with open(os.path.join("prompts", filename), "r", encoding="utf-8") as f: + return f.read() + except FileNotFoundError: + raise gr.Error(f"Arquivo de prompt não encontrado: prompts/{filename}") + + def generate_storyboard(self, prompt: str, num_keyframes: int, ref_image_paths: list[str]) -> list[str]: + self._check_model() + try: + template = self._read_prompt_template("unified_storyboard_prompt.txt") + storyboard_prompt = template.format(user_prompt=prompt, num_fragments=num_keyframes) + model_contents = [storyboard_prompt] + [Image.open(p) for p in ref_image_paths] + response = self.model.generate_content(model_contents) + + logger.info(f"--- RESPOSTA COMPLETA DO GEMINI (generate_storyboard) ---\n{response.text}\n--------------------") + + storyboard_data = robust_json_parser(response.text) + storyboard = storyboard_data.get("scene_storyboard", []) + if not storyboard or len(storyboard) != num_keyframes: raise ValueError(f"Número incorreto de cenas gerado.") + return storyboard + except Exception as e: + raise gr.Error(f"O Roteirista (Gemini) falhou: {e}") + + def select_keyframes_from_pool(self, storyboard: list, base_image_paths: list[str], pool_image_paths: list[str]) -> list[str]: + self._check_model() + if not pool_image_paths: + raise gr.Error("O 'banco de imagens' (Imagens Adicionais) está vazio.") + + try: + template = self._read_prompt_template("keyframe_selection_prompt.txt") + + image_map = {f"IMG-{i+1}": path for i, path in enumerate(pool_image_paths)} + base_image_map = {f"BASE-{i+1}": path for i, path in enumerate(base_image_paths)} + + model_contents = ["# Reference Images (Story Base)"] + for identifier, path in base_image_map.items(): + model_contents.extend([f"Identifier: {identifier}", Image.open(path)]) + + model_contents.append("\n# Image Pool (Scene Bank)") + for identifier, path in image_map.items(): + model_contents.extend([f"Identifier: {identifier}", Image.open(path)]) + + storyboard_str = "\n".join([f"- Scene {i+1}: {s}" for i, s in enumerate(storyboard)]) + selection_prompt = template.format(storyboard_str=storyboard_str, image_identifiers=list(image_map.keys())) + model_contents.append(selection_prompt) + + response = self.model.generate_content(model_contents) + + logger.info(f"--- RESPOSTA COMPLETA DO GEMINI (select_keyframes_from_pool) ---\n{response.text}\n--------------------") + + selection_data = robust_json_parser(response.text) + selected_identifiers = selection_data.get("selected_image_identifiers", []) + + if len(selected_identifiers) != len(storyboard): + raise ValueError("A IA não selecionou o número correto de imagens para as cenas.") + + selected_paths = [image_map[identifier] for identifier in selected_identifiers] + return selected_paths + + except Exception as e: + raise gr.Error(f"O Fotógrafo (Gemini) falhou ao selecionar as imagens: {e}") + + def get_anticipatory_keyframe_prompt(self, global_prompt: str, scene_history: str, current_scene_desc: str, future_scene_desc: str, last_image_path: str, fixed_ref_paths: list[str]) -> str: + self._check_model() + try: + template = self._read_prompt_template("anticipatory_keyframe_prompt.txt") + + director_prompt = template.format( + historico_prompt=scene_history, + cena_atual=current_scene_desc, + cena_futura=future_scene_desc + ) + + model_contents = [ + "# CONTEXTO:", + f"- Global Story Goal: {global_prompt}", + "# VISUAL ASSETS:", + "Current Base Image [IMG-BASE]:", + Image.open(last_image_path) + ] + + ref_counter = 1 + for path in fixed_ref_paths: + if path != last_image_path: + model_contents.extend([f"General Reference Image [IMG-REF-{ref_counter}]:", Image.open(path)]) + ref_counter += 1 + + model_contents.append(director_prompt) + + response = self.model.generate_content(model_contents) + + logger.info(f"--- RESPOSTA COMPLETA DO GEMINI (get_anticipatory_keyframe_prompt) ---\n{response.text}\n--------------------") + + final_flux_prompt = response.text.strip() + return final_flux_prompt + except Exception as e: + raise gr.Error(f"O Diretor de Arte (Gemini) falhou: {e}") + + def get_initial_motion_prompt(self, user_prompt: str, start_image_path: str, destination_image_path: str, dest_scene_desc: str) -> str: + """Gera o prompt de movimento para a PRIMEIRA transição, que não tem um 'passado'.""" + self._check_model() + try: + template = self._read_prompt_template("initial_motion_prompt.txt") + prompt_text = template.format(user_prompt=user_prompt, destination_scene_description=dest_scene_desc) + model_contents = [ + prompt_text, + "START Image:", + Image.open(start_image_path), + "DESTINATION Image:", + Image.open(destination_image_path) + ] + response = self.model.generate_content(model_contents) + + logger.info(f"--- RESPOSTA COMPLETA DO GEMINI (get_initial_motion_prompt) ---\n{response.text}\n--------------------") + + return response.text.strip() + except Exception as e: + raise gr.Error(f"O Cineasta Inicial (Gemini) falhou: {e}") + + def get_cinematic_decision(self, global_prompt: str, story_history: str, + past_keyframe_path: str, present_keyframe_path: str, future_keyframe_path: str, + past_scene_desc: str, present_scene_desc: str, future_scene_desc: str) -> dict: + """ + Atua como um 'Cineasta', analisando passado, presente e futuro para tomar decisões + de edição e gerar prompts de movimento detalhados. + """ + self._check_model() + try: + template = self._read_prompt_template("cinematic_director_prompt.txt") + prompt_text = template.format( + global_prompt=global_prompt, + story_history=story_history, + past_scene_desc=past_scene_desc, + present_scene_desc=present_scene_desc, + future_scene_desc=future_scene_desc + ) + + model_contents = [ + prompt_text, + "[PAST_IMAGE]:", Image.open(past_keyframe_path), + "[PRESENT_IMAGE]:", Image.open(present_keyframe_path), + "[FUTURE_IMAGE]:", Image.open(future_keyframe_path) + ] + + response = self.model.generate_content(model_contents) + + logger.info(f"--- RESPOSTA COMPLETA DO GEMINI (get_cinematic_decision) ---\n{response.text}\n--------------------") + + decision_data = robust_json_parser(response.text) + if "transition_type" not in decision_data or "motion_prompt" not in decision_data: + raise ValueError("Resposta da IA (Cineasta) está mal formatada. Faltam 'transition_type' ou 'motion_prompt'.") + return decision_data + except Exception as e: + # Fallback para uma decisão segura em caso de erro + logger.error(f"O Diretor de Cinema (Gemini) falhou: {e}. Usando fallback para 'continuous'.") + return { + "transition_type": "continuous", + "motion_prompt": f"A smooth, continuous cinematic transition from '{present_scene_desc}' to '{future_scene_desc}'." + } + + + + def get_sound_director_prompt(self, audio_history: str, + past_keyframe_path: str, present_keyframe_path: str, future_keyframe_path: str, + present_scene_desc: str, motion_prompt: str, future_scene_desc: str) -> str: + """ + Atua como um 'Diretor de Som', analisando o contexto completo para criar um prompt + de áudio imersivo e contínuo para a cena atual. + """ + self._check_model() + try: + template = self._read_prompt_template("sound_director_prompt.txt") + prompt_text = template.format( + audio_history=audio_history, + present_scene_desc=present_scene_desc, + motion_prompt=motion_prompt, + future_scene_desc=future_scene_desc + ) + + model_contents = [ + prompt_text, + "[PAST_IMAGE]:", Image.open(past_keyframe_path), + "[PRESENT_IMAGE]:", Image.open(present_keyframe_path), + "[FUTURE_IMAGE]:", Image.open(future_keyframe_path) + ] + + response = self.model.generate_content(model_contents) + + logger.info(f"--- RESPOSTA COMPLETA DO GEMINI (get_sound_director_prompt) ---\n{response.text}\n--------------------") + + return response.text.strip() + except Exception as e: + logger.error(f"O Diretor de Som (Gemini) falhou: {e}. Usando fallback.") + return f"Sound effects matching the scene: {present_scene_desc}" + + +gemini_singleton = GeminiSingleton() \ No newline at end of file diff --git a/hardware_manager.py b/hardware_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..f5c19870ee2f9da766ecf8dd441debf3784d6ff8 --- /dev/null +++ b/hardware_manager.py @@ -0,0 +1,35 @@ +# hardware_manager.py +# Gerencia a detecção e alocação de GPUs para os especialistas. +# Copyright (C) 2025 Carlos Rodrigues dos Santos + +import torch +import logging + +logger = logging.getLogger(__name__) + +class HardwareManager: + def __init__(self): + self.gpus = [] + self.allocated_gpus = set() + if torch.cuda.is_available(): + self.gpus = [f'cuda:{i}' for i in range(torch.cuda.device_count())] + logger.info(f"Hardware Manager: Encontradas {len(self.gpus)} GPUs disponíveis: {self.gpus}") + + def allocate_gpus(self, specialist_name: str, num_required: int) -> list[str]: + if not self.gpus or num_required == 0: + logger.warning(f"Nenhuma GPU disponível ou solicitada para '{specialist_name}'. Alocando para CPU.") + return ['cpu'] + + available_gpus = [gpu for gpu in self.gpus if gpu not in self.allocated_gpus] + + if len(available_gpus) < num_required: + error_msg = f"Recursos de GPU insuficientes para '{specialist_name}'. Solicitado: {num_required}, Disponível: {len(available_gpus)}." + logger.error(error_msg) + raise RuntimeError(error_msg) + + allocated = available_gpus[:num_required] + self.allocated_gpus.update(allocated) + logger.info(f"Hardware Manager: Alocando GPUs {allocated} para o especialista '{specialist_name}'.") + return allocated + +hardware_manager = HardwareManager() \ No newline at end of file diff --git a/i18n.json b/i18n.json new file mode 100644 index 0000000000000000000000000000000000000000..a5a2baae609d72447899ecd55f2deec524c2bbf9 --- /dev/null +++ b/i18n.json @@ -0,0 +1,128 @@ +{ + "pt": { + "app_title": "ADUC-SDR 🎬 - O Diretor de Cinema IA", + "app_subtitle": "Crie um filme completo com vídeo e áudio, orquestrado por uma equipe de IAs.", + "lang_selector_label": "Idioma / Language", + "step1_accordion": "Etapa 1: Roteiro e Cenas-Chave", + "prompt_label": "Ideia Geral do Filme", + "ref_images_base_label": "Imagens de Referência (Base da História)", + "ref_images_extra_label": "Imagens Adicionais (Banco de Cenas para o Modo Fotógrafo)", + "keyframes_label": "Número de Cenas-Chave", + "storyboard_button": "1. Gerar Roteiro", + "storyboard_and_keyframes_button": "1A. Gerar Roteiro e Keyframes (Modo Diretor de Arte)", + "storyboard_from_photos_button": "1B. Gerar Roteiro a partir de Fotos (Modo Fotógrafo)", + "step1_mode_b_info": "Modo Fotógrafo: As 'Imagens Adicionais' são usadas como um banco de cenas e a IA escolherá a melhor para cada parte do roteiro.", + "storyboard_output_label": "Roteiro Gerado (Storyboard)", + "step2_accordion": "Etapa 2: Os Keyframes (Especialista: Flux)", + "step2_description": "O Diretor de Arte (Gemini) guiará o Pintor (Flux) para criar as imagens-chave da sua história.", + "art_director_label": "Usar Diretor de Arte IA (para prompts de keyframe)", + "keyframes_button": "2. Gerar Imagens-Chave", + "keyframes_gallery_label": "Galeria de Cenas-Chave (Keyframes)", + "manual_keyframes_label": "Carregar Keyframes Manualmente", + "manual_separator": "--- OU ---", + "step3_accordion": "Etapa 3: A Produção do Filme (Especialistas: LTX & MMAudio)", + "step3_description": "O Diretor de Continuidade e o Cineasta irão guiar a Câmera (LTX) para filmar as transições entre os keyframes.", + "continuity_director_label": "Usar Diretor de Continuidade IA (para cortes)", + "cinematographer_label": "Usar Cineasta IA (para prompts de movimento)", + "duration_label": "Duração por Cena (s)", + "n_corte_label": "Ponto de Corte Base (%)", + "n_corte_info": "Percentual base da cena a ser substituído pela transição. Será ajustado dinamicamente.", + "convergence_chunks_label": "Máx. Chunks de Convergência", + "convergence_chunks_info": "Nº máx. de chunks latentes (memória) para guiar a convergência do movimento. Será ajustado dinamicamente.", + "path_convergence_label": "Força do Handler (Tensor)", + "destination_convergence_label": "Convergência do Destino (Tensor)", + "produce_button": "3. 🎬 Produzir Filme Completo (com Som)", + "advanced_accordion_label": "Configurações Avançadas (LTX)", + "guidance_label": "Guidance Scale", + "stg_label": "STG Scale", + "rescaling_label": "Rescaling Scale", + "steps_label": "Passos de Inferência", + "steps_info": "Mais passos podem melhorar a qualidade, mas aumentam o tempo. Ignorado para modelos 'distilled'.", + "video_fragments_gallery_label": "Fragmentos do Filme Gerados", + "final_movie_with_audio_label": "🎉 FILME COMPLETO 🎉" + }, + "en": { + "app_title": "ADUC-SDR 🎬 - The AI Film Director", + "app_subtitle": "Create a complete film with video and audio, orchestrated by a team of AIs.", + "lang_selector_label": "Language / Idioma", + "step1_accordion": "Step 1: Script & Key Scenes", + "prompt_label": "General Film Idea", + "ref_images_base_label": "Reference Images (Story Base)", + "ref_images_extra_label": "Additional Images (Scene Bank for Photographer Mode)", + "keyframes_label": "Number of Key-Scenes", + "storyboard_button": "1. Generate Script", + "storyboard_and_keyframes_button": "1A. Generate Script & Keyframes (Art Director Mode)", + "storyboard_from_photos_button": "1B. Generate Script from Photos (Photographer Mode)", + "step1_mode_b_info": "Photographer Mode: 'Additional Images' are used as a scene bank, and the AI will choose the best one for each script part.", + "storyboard_output_label": "Generated Script (Storyboard)", + "step2_accordion": "Step 2: The Keyframes (Specialist: Flux)", + "step2_description": "The Art Director (Gemini) will guide the Painter (Flux) to create the key images of your story.", + "art_director_label": "Use AI Art Director (for keyframe prompts)", + "keyframes_button": "2. Generate Key-Images", + "keyframes_gallery_label": "Key-Scenes Gallery (Keyframes)", + "manual_keyframes_label": "Upload Keyframes Manually", + "manual_separator": "--- OR ---", + "step3_accordion": "Step 3: Film Production (Specialists: LTX & MMAudio)", + "step3_description": "The Continuity Director and Cinematographer will guide the Camera (LTX) to shoot the transitions between keyframes.", + "continuity_director_label": "Use AI Continuity Director (for cuts)", + "cinematographer_label": "Use AI Cinematographer (for motion prompts)", + "duration_label": "Duration per Scene (s)", + "n_corte_label": "Base Cut Point (%)", + "n_corte_info": "Base percentage of the scene to be replaced by the transition. Will be adjusted dynamically.", + "convergence_chunks_label": "Max Convergence Chunks", + "convergence_chunks_info": "Max number of latent chunks (memory) to guide motion convergence. Will be adjusted dynamically.", + "path_convergence_label": "Handler Strength (Tensor)", + "destination_convergence_label": "Destination Convergence (Tensor)", + "produce_button": "3. 🎬 Produce Complete Film (with Sound)", + "advanced_accordion_label": "Advanced Settings (LTX)", + "guidance_label": "Guidance Scale", + "stg_label": "STG Scale", + "rescaling_label": "Rescaling Scale", + "steps_label": "Inference Steps", + "steps_info": "More steps can improve quality but increase generation time. Ignored for 'distilled' models.", + "video_fragments_gallery_label": "Generated Film Fragments", + "final_movie_with_audio_label": "🎉 COMPLETE MOVIE 🎉" + }, + "zh": { + "app_title": "ADUC-SDR 🎬 - 人工智能电影导演", + "app_subtitle": "由人工智能团队精心策划,根据一个想法和参考图像创作一部完整的有声电影。", + "lang_selector_label": "语言 / Language", + "step1_accordion": "第 1 步:剧本和关键场景", + "prompt_label": "电影总体构想", + "ref_images_base_label": "参考图像 (故事基础)", + "ref_images_extra_label": "附加图像 (摄影师模式的场景库)", + "keyframes_label": "关键场景数量", + "storyboard_button": "1. 生成剧本", + "storyboard_and_keyframes_button": "1A. 生成剧本和关键帧 (艺术总监模式)", + "storyboard_from_photos_button": "1B. 从照片生成剧本 (摄影师模式)", + "step1_mode_b_info": "摄影师模式:“附加图像”被用作场景库,AI将为剧本的每个部分选择最佳图像。", + "storyboard_output_label": "生成的剧本", + "step2_accordion": "第 2 步:关键帧 (专家: Flux)", + "step2_description": "艺术总监 (Gemini) 将指导画家 (Flux) 创作故事的关键图像。", + "art_director_label": "使用AI艺术总监", + "keyframes_button": "2. 生成关键图像", + "keyframes_gallery_label": "关键场景画廊 (关键帧)", + "manual_keyframes_label": "手动上传关键帧", + "manual_separator": "--- 或者 ---", + "step3_accordion": "第 3 步:影片制作 (专家: LTX & MMAudio)", + "step3_description": "连续性导演和电影摄影师将指导摄像机 (LTX) 拍摄关键帧之间的过渡。", + "continuity_director_label": "使用AI连续性导演", + "cinematographer_label": "使用AI电影摄影师", + "duration_label": "每场景时长 (秒)", + "n_corte_label": "基础剪辑点 (%)", + "n_corte_info": "将被过渡替换的场景基础百分比。将动态调整。", + "convergence_chunks_label": "最大收敛块", + "convergence_chunks_info": "用于引导运动收敛的最大潜在块(内存)数量。将动态调整。", + "path_convergence_label": "处理器强度 (张量)", + "destination_convergence_label": "目标收敛 (张量)", + "produce_button": "3. 🎬 制作完整影片 (有声)", + "advanced_accordion_label": "高级设置 (LTX)", + "guidance_label": "引导比例", + "stg_label": "STG 比例", + "rescaling_label": "重缩放比例", + "steps_label": "推理步骤", + "steps_info": "更多步骤可以提高质量,但会增加生成时间。对“distilled”模型无效。", + "video_fragments_gallery_label": "生成的电影片段", + "final_movie_with_audio_label": "🎉 完整影片 🎉" + } +} \ No newline at end of file diff --git a/image_specialist.py b/image_specialist.py new file mode 100644 index 0000000000000000000000000000000000000000..04d3113272ce4e5a8fbcdffce88d00690f88be7b --- /dev/null +++ b/image_specialist.py @@ -0,0 +1,98 @@ +# image_specialist.py +# Copyright (C) 2025 Carlos Rodrigues dos Santos +# +# Este programa é software livre: você pode redistribuí-lo e/ou modificá-lo +# sob os termos da Licença Pública Geral Affero GNU... +# AVISO DE PATENTE PENDENTE: Consulte NOTICE.md. + +from PIL import Image +import os +import time +import logging +import gradio as gr +import yaml + +from flux_kontext_helpers import flux_kontext_singleton +from gemini_helpers import gemini_singleton + +logger = logging.getLogger(__name__) + +class ImageSpecialist: + """ + Especialista ADUC para a geração de imagens estáticas (keyframes). + É responsável por todo o processo de transformar um roteiro em uma galeria de keyframes. + """ + def __init__(self, workspace_dir): + self.workspace_dir = workspace_dir + self.image_generation_helper = flux_kontext_singleton + logger.info("Especialista de Imagem (Flux) pronto para receber ordens do Maestro.") + + def _generate_single_keyframe(self, prompt: str, reference_images: list[Image.Image], output_filename: str, width: int, height: int, callback: callable = None) -> str: + """ + Função de baixo nível que gera uma única imagem. + """ + logger.info(f"Gerando keyframe '{output_filename}' com prompt: '{prompt}'") + generated_image = self.image_generation_helper.generate_image( + reference_images=reference_images, prompt=prompt, width=width, + height=height, seed=int(time.time()), callback=callback + ) + final_path = os.path.join(self.workspace_dir, output_filename) + generated_image.save(final_path) + logger.info(f"Keyframe salvo com sucesso em: {final_path}") + return final_path + + def generate_keyframes_from_storyboard(self, storyboard: list, initial_ref_path: str, global_prompt: str, keyframe_resolution: int, general_ref_paths: list, progress_callback_factory: callable = None): + """ + Orquestra a geração de todos os keyframes a partir de um storyboard. + """ + current_base_image_path = initial_ref_path + previous_prompt = "N/A (imagem inicial de referência)" + final_keyframes = [current_base_image_path] + width, height = keyframe_resolution, keyframe_resolution + + # O número de keyframes a gerar é len(storyboard) - 1, pois o primeiro keyframe já existe (initial_ref_path) + # E o storyboard tem o mesmo número de elementos que o número total de keyframes desejados. + num_keyframes_to_generate = len(storyboard) - 1 + + logger.info(f"ESPECIALISTA DE IMAGEM: Recebi ordem para gerar {num_keyframes_to_generate} keyframes.") + + for i in range(num_keyframes_to_generate): + # A cena atual é a transição de storyboard[i] para storyboard[i+1] + current_scene = storyboard[i] + future_scene = storyboard[i+1] + progress_callback = progress_callback_factory(i + 1, num_keyframes_to_generate) if progress_callback_factory else None + + logger.info(f"--> Gerando Keyframe {i+1}/{num_keyframes_to_generate}...") + + # O próprio especialista consulta o Gemini para o prompt de imagem + new_flux_prompt = gemini_singleton.get_anticipatory_keyframe_prompt( + global_prompt=global_prompt, scene_history=previous_prompt, + current_scene_desc=current_scene, future_scene_desc=future_scene, + last_image_path=current_base_image_path, fixed_ref_paths=general_ref_paths + ) + + images_for_flux_paths = list(set([current_base_image_path] + general_ref_paths)) + images_for_flux = [Image.open(p) for p in images_for_flux_paths] + + new_keyframe_path = self._generate_single_keyframe( + prompt=new_flux_prompt, reference_images=images_for_flux, + output_filename=f"keyframe_{i+1}.png", width=width, height=height, + callback=progress_callback + ) + + final_keyframes.append(new_keyframe_path) + current_base_image_path = new_keyframe_path + previous_prompt = new_flux_prompt + + logger.info(f"ESPECIALISTA DE IMAGEM: Geração de keyframes concluída.") + return final_keyframes + +# Singleton instantiation - usa o workspace_dir da config +try: + with open("config.yaml", 'r') as f: + config = yaml.safe_load(f) + WORKSPACE_DIR = config['application']['workspace_dir'] + image_specialist_singleton = ImageSpecialist(workspace_dir=WORKSPACE_DIR) +except Exception as e: + logger.error(f"Não foi possível inicializar o ImageSpecialist: {e}", exc_info=True) + image_specialist_singleton = None \ No newline at end of file diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..0be9213f5016d6559ca3c04e435388069235c88d --- /dev/null +++ b/inference.py @@ -0,0 +1,774 @@ +import argparse +import os +import random +from datetime import datetime +from pathlib import Path +from diffusers.utils import logging +from typing import Optional, List, Union +import yaml + +import imageio +import json +import numpy as np +import torch +import cv2 +from safetensors import safe_open +from PIL import Image +from transformers import ( + T5EncoderModel, + T5Tokenizer, + AutoModelForCausalLM, + AutoProcessor, + AutoTokenizer, +) +from huggingface_hub import hf_hub_download + +from ltx_video.models.autoencoders.causal_video_autoencoder import ( + CausalVideoAutoencoder, +) +from ltx_video.models.transformers.symmetric_patchifier import SymmetricPatchifier +from ltx_video.models.transformers.transformer3d import Transformer3DModel +from ltx_video.pipelines.pipeline_ltx_video import ( + ConditioningItem, + LTXVideoPipeline, + LTXMultiScalePipeline, +) +from ltx_video.schedulers.rf import RectifiedFlowScheduler +from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy +from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler +import ltx_video.pipelines.crf_compressor as crf_compressor + +MAX_HEIGHT = 720 +MAX_WIDTH = 1280 +MAX_NUM_FRAMES = 257 + +logger = logging.get_logger("LTX-Video") + + +def get_total_gpu_memory(): + if torch.cuda.is_available(): + total_memory = torch.cuda.get_device_properties(0).total_memory / (1024**3) + return total_memory + return 44 + + +def get_device(): + if torch.cuda.is_available(): + return "cuda" + elif torch.backends.mps.is_available(): + return "mps" + return "cuda" + + +def load_image_to_tensor_with_resize_and_crop( + image_input: Union[str, Image.Image], + target_height: int = 512, + target_width: int = 768, + just_crop: bool = False, +) -> torch.Tensor: + """Load and process an image into a tensor. + + Args: + image_input: Either a file path (str) or a PIL Image object + target_height: Desired height of output tensor + target_width: Desired width of output tensor + just_crop: If True, only crop the image to the target size without resizing + """ + if isinstance(image_input, str): + image = Image.open(image_input).convert("RGB") + elif isinstance(image_input, Image.Image): + image = image_input + else: + raise ValueError("image_input must be either a file path or a PIL Image object") + + input_width, input_height = image.size + aspect_ratio_target = target_width / target_height + aspect_ratio_frame = input_width / input_height + if aspect_ratio_frame > aspect_ratio_target: + new_width = int(input_height * aspect_ratio_target) + new_height = input_height + x_start = (input_width - new_width) // 2 + y_start = 0 + else: + new_width = input_width + new_height = int(input_width / aspect_ratio_target) + x_start = 0 + y_start = (input_height - new_height) // 2 + + image = image.crop((x_start, y_start, x_start + new_width, y_start + new_height)) + if not just_crop: + image = image.resize((target_width, target_height)) + + image = np.array(image) + image = cv2.GaussianBlur(image, (3, 3), 0) + frame_tensor = torch.from_numpy(image).float() + frame_tensor = crf_compressor.compress(frame_tensor / 255.0) * 255.0 + frame_tensor = frame_tensor.permute(2, 0, 1) + frame_tensor = (frame_tensor / 127.5) - 1.0 + # Create 5D tensor: (batch_size=1, channels=3, num_frames=1, height, width) + return frame_tensor.unsqueeze(0).unsqueeze(2) + + +def calculate_padding( + source_height: int, source_width: int, target_height: int, target_width: int +) -> tuple[int, int, int, int]: + + # Calculate total padding needed + pad_height = target_height - source_height + pad_width = target_width - source_width + + # Calculate padding for each side + pad_top = pad_height // 2 + pad_bottom = pad_height - pad_top # Handles odd padding + pad_left = pad_width // 2 + pad_right = pad_width - pad_left # Handles odd padding + + # Return padded tensor + # Padding format is (left, right, top, bottom) + padding = (pad_left, pad_right, pad_top, pad_bottom) + return padding + + +def convert_prompt_to_filename(text: str, max_len: int = 20) -> str: + # Remove non-letters and convert to lowercase + clean_text = "".join( + char.lower() for char in text if char.isalpha() or char.isspace() + ) + + # Split into words + words = clean_text.split() + + # Build result string keeping track of length + result = [] + current_length = 0 + + for word in words: + # Add word length plus 1 for underscore (except for first word) + new_length = current_length + len(word) + + if new_length <= max_len: + result.append(word) + current_length += len(word) + else: + break + + return "-".join(result) + + +# Generate output video name +def get_unique_filename( + base: str, + ext: str, + prompt: str, + seed: int, + resolution: tuple[int, int, int], + dir: Path, + endswith=None, + index_range=1000, +) -> Path: + base_filename = f"{base}_{convert_prompt_to_filename(prompt, max_len=30)}_{seed}_{resolution[0]}x{resolution[1]}x{resolution[2]}" + for i in range(index_range): + filename = dir / f"{base_filename}_{i}{endswith if endswith else ''}{ext}" + if not os.path.exists(filename): + return filename + raise FileExistsError( + f"Could not find a unique filename after {index_range} attempts." + ) + + +def seed_everething(seed: int): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(seed) + if torch.backends.mps.is_available(): + torch.mps.manual_seed(seed) + + +def main(): + parser = argparse.ArgumentParser( + description="Load models from separate directories and run the pipeline." + ) + + # Directories + parser.add_argument( + "--output_path", + type=str, + default=None, + help="Path to the folder to save output video, if None will save in outputs/ directory.", + ) + parser.add_argument("--seed", type=int, default="171198") + + # Pipeline parameters + parser.add_argument( + "--num_images_per_prompt", + type=int, + default=1, + help="Number of images per prompt", + ) + parser.add_argument( + "--image_cond_noise_scale", + type=float, + default=0.15, + help="Amount of noise to add to the conditioned image", + ) + parser.add_argument( + "--height", + type=int, + default=704, + help="Height of the output video frames. Optional if an input image provided.", + ) + parser.add_argument( + "--width", + type=int, + default=1216, + help="Width of the output video frames. If None will infer from input image.", + ) + parser.add_argument( + "--num_frames", + type=int, + default=121, + help="Number of frames to generate in the output video", + ) + parser.add_argument( + "--frame_rate", type=int, default=30, help="Frame rate for the output video" + ) + parser.add_argument( + "--device", + default=None, + help="Device to run inference on. If not specified, will automatically detect and use CUDA or MPS if available, else CPU.", + ) + parser.add_argument( + "--pipeline_config", + type=str, + default="configs/ltxv-13b-0.9.7-dev.yaml", + help="The path to the config file for the pipeline, which contains the parameters for the pipeline", + ) + + # Prompts + parser.add_argument( + "--prompt", + type=str, + help="Text prompt to guide generation", + ) + parser.add_argument( + "--negative_prompt", + type=str, + default="worst quality, inconsistent motion, blurry, jittery, distorted", + help="Negative prompt for undesired features", + ) + + parser.add_argument( + "--offload_to_cpu", + action="store_true", + help="Offloading unnecessary computations to CPU.", + ) + + # video-to-video arguments: + parser.add_argument( + "--input_media_path", + type=str, + default=None, + help="Path to the input video (or imaage) to be modified using the video-to-video pipeline", + ) + + # Conditioning arguments + parser.add_argument( + "--conditioning_media_paths", + type=str, + nargs="*", + help="List of paths to conditioning media (images or videos). Each path will be used as a conditioning item.", + ) + parser.add_argument( + "--conditioning_strengths", + type=float, + nargs="*", + help="List of conditioning strengths (between 0 and 1) for each conditioning item. Must match the number of conditioning items.", + ) + parser.add_argument( + "--conditioning_start_frames", + type=int, + nargs="*", + help="List of frame indices where each conditioning item should be applied. Must match the number of conditioning items.", + ) + + args = parser.parse_args() + logger.warning(f"Running generation with arguments: {args}") + infer(**vars(args)) + + +def create_ltx_video_pipeline( + ckpt_path: str, + precision: str, + text_encoder_model_name_or_path: str, + sampler: Optional[str] = None, + device: Optional[str] = None, + enhance_prompt: bool = False, + prompt_enhancer_image_caption_model_name_or_path: Optional[str] = None, + prompt_enhancer_llm_model_name_or_path: Optional[str] = None, +) -> LTXVideoPipeline: + ckpt_path = Path(ckpt_path) + assert os.path.exists( + ckpt_path + ), f"Ckpt path provided (--ckpt_path) {ckpt_path} does not exist" + + with safe_open(ckpt_path, framework="pt") as f: + metadata = f.metadata() + config_str = metadata.get("config") + configs = json.loads(config_str) + allowed_inference_steps = configs.get("allowed_inference_steps", None) + + vae = CausalVideoAutoencoder.from_pretrained(ckpt_path) + transformer = Transformer3DModel.from_pretrained(ckpt_path) + + # Use constructor if sampler is specified, otherwise use from_pretrained + if sampler == "from_checkpoint" or not sampler: + scheduler = RectifiedFlowScheduler.from_pretrained(ckpt_path) + else: + scheduler = RectifiedFlowScheduler( + sampler=("Uniform" if sampler.lower() == "uniform" else "LinearQuadratic") + ) + + text_encoder = T5EncoderModel.from_pretrained( + text_encoder_model_name_or_path, subfolder="text_encoder" + ) + patchifier = SymmetricPatchifier(patch_size=1) + tokenizer = T5Tokenizer.from_pretrained( + text_encoder_model_name_or_path, subfolder="tokenizer" + ) + + transformer = transformer.to(device) + vae = vae.to(device) + text_encoder = text_encoder.to(device) + + if enhance_prompt: + prompt_enhancer_image_caption_model = AutoModelForCausalLM.from_pretrained( + prompt_enhancer_image_caption_model_name_or_path, trust_remote_code=True + ) + prompt_enhancer_image_caption_processor = AutoProcessor.from_pretrained( + prompt_enhancer_image_caption_model_name_or_path, trust_remote_code=True + ) + prompt_enhancer_llm_model = AutoModelForCausalLM.from_pretrained( + prompt_enhancer_llm_model_name_or_path, + torch_dtype="bfloat16", + ) + prompt_enhancer_llm_tokenizer = AutoTokenizer.from_pretrained( + prompt_enhancer_llm_model_name_or_path, + ) + else: + prompt_enhancer_image_caption_model = None + prompt_enhancer_image_caption_processor = None + prompt_enhancer_llm_model = None + prompt_enhancer_llm_tokenizer = None + + vae = vae.to(torch.bfloat16) + if precision == "bfloat16" and transformer.dtype != torch.bfloat16: + transformer = transformer.to(torch.bfloat16) + text_encoder = text_encoder.to(torch.bfloat16) + + # Use submodels for the pipeline + submodel_dict = { + "transformer": transformer, + "patchifier": patchifier, + "text_encoder": text_encoder, + "tokenizer": tokenizer, + "scheduler": scheduler, + "vae": vae, + "prompt_enhancer_image_caption_model": prompt_enhancer_image_caption_model, + "prompt_enhancer_image_caption_processor": prompt_enhancer_image_caption_processor, + "prompt_enhancer_llm_model": prompt_enhancer_llm_model, + "prompt_enhancer_llm_tokenizer": prompt_enhancer_llm_tokenizer, + "allowed_inference_steps": allowed_inference_steps, + } + + pipeline = LTXVideoPipeline(**submodel_dict) + pipeline = pipeline.to(device) + return pipeline + + +def create_latent_upsampler(latent_upsampler_model_path: str, device: str): + latent_upsampler = LatentUpsampler.from_pretrained(latent_upsampler_model_path) + latent_upsampler.to(device) + latent_upsampler.eval() + return latent_upsampler + + +def infer( + output_path: Optional[str], + seed: int, + pipeline_config: str, + image_cond_noise_scale: float, + height: Optional[int], + width: Optional[int], + num_frames: int, + frame_rate: int, + prompt: str, + negative_prompt: str, + offload_to_cpu: bool, + input_media_path: Optional[str] = None, + conditioning_media_paths: Optional[List[str]] = None, + conditioning_strengths: Optional[List[float]] = None, + conditioning_start_frames: Optional[List[int]] = None, + device: Optional[str] = None, + **kwargs, +): + # check if pipeline_config is a file + if not os.path.isfile(pipeline_config): + raise ValueError(f"Pipeline config file {pipeline_config} does not exist") + with open(pipeline_config, "r") as f: + pipeline_config = yaml.safe_load(f) + + models_dir = "MODEL_DIR" + + ltxv_model_name_or_path = pipeline_config["checkpoint_path"] + if not os.path.isfile(ltxv_model_name_or_path): + ltxv_model_path = hf_hub_download( + repo_id="Lightricks/LTX-Video", + filename=ltxv_model_name_or_path, + local_dir=models_dir, + repo_type="model", + ) + else: + ltxv_model_path = ltxv_model_name_or_path + + spatial_upscaler_model_name_or_path = pipeline_config.get( + "spatial_upscaler_model_path" + ) + if spatial_upscaler_model_name_or_path and not os.path.isfile( + spatial_upscaler_model_name_or_path + ): + spatial_upscaler_model_path = hf_hub_download( + repo_id="Lightricks/LTX-Video", + filename=spatial_upscaler_model_name_or_path, + local_dir=models_dir, + repo_type="model", + ) + else: + spatial_upscaler_model_path = spatial_upscaler_model_name_or_path + + if kwargs.get("input_image_path", None): + logger.warning( + "Please use conditioning_media_paths instead of input_image_path." + ) + assert not conditioning_media_paths and not conditioning_start_frames + conditioning_media_paths = [kwargs["input_image_path"]] + conditioning_start_frames = [0] + + # Validate conditioning arguments + if conditioning_media_paths: + # Use default strengths of 1.0 + if not conditioning_strengths: + conditioning_strengths = [1.0] * len(conditioning_media_paths) + if not conditioning_start_frames: + raise ValueError( + "If `conditioning_media_paths` is provided, " + "`conditioning_start_frames` must also be provided" + ) + if len(conditioning_media_paths) != len(conditioning_strengths) or len( + conditioning_media_paths + ) != len(conditioning_start_frames): + raise ValueError( + "`conditioning_media_paths`, `conditioning_strengths`, " + "and `conditioning_start_frames` must have the same length" + ) + if any(s < 0 or s > 1 for s in conditioning_strengths): + raise ValueError("All conditioning strengths must be between 0 and 1") + if any(f < 0 or f >= num_frames for f in conditioning_start_frames): + raise ValueError( + f"All conditioning start frames must be between 0 and {num_frames-1}" + ) + + seed_everething(seed) + if offload_to_cpu and not torch.cuda.is_available(): + logger.warning( + "offload_to_cpu is set to True, but offloading will not occur since the model is already running on CPU." + ) + offload_to_cpu = False + else: + offload_to_cpu = offload_to_cpu and get_total_gpu_memory() < 30 + + output_dir = ( + Path(output_path) + if output_path + else Path(f"outputs/{datetime.today().strftime('%Y-%m-%d')}") + ) + output_dir.mkdir(parents=True, exist_ok=True) + + # Adjust dimensions to be divisible by 32 and num_frames to be (N * 8 + 1) + height_padded = ((height - 1) // 32 + 1) * 32 + width_padded = ((width - 1) // 32 + 1) * 32 + num_frames_padded = ((num_frames - 2) // 8 + 1) * 8 + 1 + + padding = calculate_padding(height, width, height_padded, width_padded) + + logger.warning( + f"Padded dimensions: {height_padded}x{width_padded}x{num_frames_padded}" + ) + + prompt_enhancement_words_threshold = pipeline_config[ + "prompt_enhancement_words_threshold" + ] + + prompt_word_count = len(prompt.split()) + enhance_prompt = ( + prompt_enhancement_words_threshold > 0 + and prompt_word_count < prompt_enhancement_words_threshold + ) + + if prompt_enhancement_words_threshold > 0 and not enhance_prompt: + logger.info( + f"Prompt has {prompt_word_count} words, which exceeds the threshold of {prompt_enhancement_words_threshold}. Prompt enhancement disabled." + ) + + precision = pipeline_config["precision"] + text_encoder_model_name_or_path = pipeline_config["text_encoder_model_name_or_path"] + sampler = pipeline_config["sampler"] + prompt_enhancer_image_caption_model_name_or_path = pipeline_config[ + "prompt_enhancer_image_caption_model_name_or_path" + ] + prompt_enhancer_llm_model_name_or_path = pipeline_config[ + "prompt_enhancer_llm_model_name_or_path" + ] + + pipeline = create_ltx_video_pipeline( + ckpt_path=ltxv_model_path, + precision=precision, + text_encoder_model_name_or_path=text_encoder_model_name_or_path, + sampler=sampler, + device=kwargs.get("device", get_device()), + enhance_prompt=enhance_prompt, + prompt_enhancer_image_caption_model_name_or_path=prompt_enhancer_image_caption_model_name_or_path, + prompt_enhancer_llm_model_name_or_path=prompt_enhancer_llm_model_name_or_path, + ) + + if pipeline_config.get("pipeline_type", None) == "multi-scale": + if not spatial_upscaler_model_path: + raise ValueError( + "spatial upscaler model path is missing from pipeline config file and is required for multi-scale rendering" + ) + latent_upsampler = create_latent_upsampler( + spatial_upscaler_model_path, pipeline.device + ) + pipeline = LTXMultiScalePipeline(pipeline, latent_upsampler=latent_upsampler) + + media_item = None + if input_media_path: + media_item = load_media_file( + media_path=input_media_path, + height=height, + width=width, + max_frames=num_frames_padded, + padding=padding, + ) + + conditioning_items = ( + prepare_conditioning( + conditioning_media_paths=conditioning_media_paths, + conditioning_strengths=conditioning_strengths, + conditioning_start_frames=conditioning_start_frames, + height=height, + width=width, + num_frames=num_frames, + padding=padding, + pipeline=pipeline, + ) + if conditioning_media_paths + else None + ) + + stg_mode = pipeline_config.get("stg_mode", "attention_values") + del pipeline_config["stg_mode"] + if stg_mode.lower() == "stg_av" or stg_mode.lower() == "attention_values": + skip_layer_strategy = SkipLayerStrategy.AttentionValues + elif stg_mode.lower() == "stg_as" or stg_mode.lower() == "attention_skip": + skip_layer_strategy = SkipLayerStrategy.AttentionSkip + elif stg_mode.lower() == "stg_r" or stg_mode.lower() == "residual": + skip_layer_strategy = SkipLayerStrategy.Residual + elif stg_mode.lower() == "stg_t" or stg_mode.lower() == "transformer_block": + skip_layer_strategy = SkipLayerStrategy.TransformerBlock + else: + raise ValueError(f"Invalid spatiotemporal guidance mode: {stg_mode}") + + # Prepare input for the pipeline + sample = { + "prompt": prompt, + "prompt_attention_mask": None, + "negative_prompt": negative_prompt, + "negative_prompt_attention_mask": None, + } + + device = device or get_device() + generator = torch.Generator(device=device).manual_seed(seed) + + images = pipeline( + **pipeline_config, + skip_layer_strategy=skip_layer_strategy, + generator=generator, + output_type="pt", + callback_on_step_end=None, + height=height_padded, + width=width_padded, + num_frames=num_frames_padded, + frame_rate=frame_rate, + **sample, + media_items=media_item, + conditioning_items=conditioning_items, + is_video=True, + vae_per_channel_normalize=True, + image_cond_noise_scale=image_cond_noise_scale, + mixed_precision=(precision == "mixed_precision"), + offload_to_cpu=offload_to_cpu, + device=device, + enhance_prompt=enhance_prompt, + ).images + + # Crop the padded images to the desired resolution and number of frames + (pad_left, pad_right, pad_top, pad_bottom) = padding + pad_bottom = -pad_bottom + pad_right = -pad_right + if pad_bottom == 0: + pad_bottom = images.shape[3] + if pad_right == 0: + pad_right = images.shape[4] + images = images[:, :, :num_frames, pad_top:pad_bottom, pad_left:pad_right] + + for i in range(images.shape[0]): + # Gathering from B, C, F, H, W to C, F, H, W and then permuting to F, H, W, C + video_np = images[i].permute(1, 2, 3, 0).cpu().float().numpy() + # Unnormalizing images to [0, 255] range + video_np = (video_np * 255).astype(np.uint8) + fps = frame_rate + height, width = video_np.shape[1:3] + # In case a single image is generated + if video_np.shape[0] == 1: + output_filename = get_unique_filename( + f"image_output_{i}", + ".png", + prompt=prompt, + seed=seed, + resolution=(height, width, num_frames), + dir=output_dir, + ) + imageio.imwrite(output_filename, video_np[0]) + else: + output_filename = get_unique_filename( + f"video_output_{i}", + ".mp4", + prompt=prompt, + seed=seed, + resolution=(height, width, num_frames), + dir=output_dir, + ) + + # Write video + with imageio.get_writer(output_filename, fps=fps) as video: + for frame in video_np: + video.append_data(frame) + + logger.warning(f"Output saved to {output_filename}") + + +def prepare_conditioning( + conditioning_media_paths: List[str], + conditioning_strengths: List[float], + conditioning_start_frames: List[int], + height: int, + width: int, + num_frames: int, + padding: tuple[int, int, int, int], + pipeline: LTXVideoPipeline, +) -> Optional[List[ConditioningItem]]: + """Prepare conditioning items based on input media paths and their parameters. + + Args: + conditioning_media_paths: List of paths to conditioning media (images or videos) + conditioning_strengths: List of conditioning strengths for each media item + conditioning_start_frames: List of frame indices where each item should be applied + height: Height of the output frames + width: Width of the output frames + num_frames: Number of frames in the output video + padding: Padding to apply to the frames + pipeline: LTXVideoPipeline object used for condition video trimming + + Returns: + A list of ConditioningItem objects. + """ + conditioning_items = [] + for path, strength, start_frame in zip( + conditioning_media_paths, conditioning_strengths, conditioning_start_frames + ): + num_input_frames = orig_num_input_frames = get_media_num_frames(path) + if hasattr(pipeline, "trim_conditioning_sequence") and callable( + getattr(pipeline, "trim_conditioning_sequence") + ): + num_input_frames = pipeline.trim_conditioning_sequence( + start_frame, orig_num_input_frames, num_frames + ) + if num_input_frames < orig_num_input_frames: + logger.warning( + f"Trimming conditioning video {path} from {orig_num_input_frames} to {num_input_frames} frames." + ) + + media_tensor = load_media_file( + media_path=path, + height=height, + width=width, + max_frames=num_input_frames, + padding=padding, + just_crop=True, + ) + conditioning_items.append(ConditioningItem(media_tensor, start_frame, strength)) + return conditioning_items + + +def get_media_num_frames(media_path: str) -> int: + is_video = any( + media_path.lower().endswith(ext) for ext in [".mp4", ".avi", ".mov", ".mkv"] + ) + num_frames = 1 + if is_video: + reader = imageio.get_reader(media_path) + num_frames = reader.count_frames() + reader.close() + return num_frames + + +def load_media_file( + media_path: str, + height: int, + width: int, + max_frames: int, + padding: tuple[int, int, int, int], + just_crop: bool = False, +) -> torch.Tensor: + is_video = any( + media_path.lower().endswith(ext) for ext in [".mp4", ".avi", ".mov", ".mkv"] + ) + if is_video: + reader = imageio.get_reader(media_path) + num_input_frames = min(reader.count_frames(), max_frames) + + # Read and preprocess the relevant frames from the video file. + frames = [] + for i in range(num_input_frames): + frame = Image.fromarray(reader.get_data(i)) + frame_tensor = load_image_to_tensor_with_resize_and_crop( + frame, height, width, just_crop=just_crop + ) + frame_tensor = torch.nn.functional.pad(frame_tensor, padding) + frames.append(frame_tensor) + reader.close() + + # Stack frames along the temporal dimension + media_tensor = torch.cat(frames, dim=2) + else: # Input image + media_tensor = load_image_to_tensor_with_resize_and_crop( + media_path, height, width, just_crop=just_crop + ) + media_tensor = torch.nn.functional.pad(media_tensor, padding) + return media_tensor + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/ltx_manager_helpers.py b/ltx_manager_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..bbd08fd2da90174784d05e24304ad1a29536e517 --- /dev/null +++ b/ltx_manager_helpers.py @@ -0,0 +1,198 @@ +# ltx_manager_helpers.py +# Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos +# +# ORIGINAL SOURCE: LTX-Video by Lightricks Ltd. & other open-source projects. +# Licensed under the Apache License, Version 2.0 +# https://github.com/Lightricks/LTX-Video +# +# MODIFICATIONS FOR ADUC-SDR_Video: +# This file is part of ADUC-SDR_Video, a derivative work based on LTX-Video. +# It has been modified to manage pools of LTX workers, handle GPU memory, +# and prepare parameters for the ADUC-SDR orchestration framework. +# All modifications are also licensed under the Apache License, Version 2.0. + +import torch +import gc +import os +import yaml +import logging +import huggingface_hub +import time +import threading +import json + +from optimization import optimize_ltx_worker, can_optimize_fp8 +from hardware_manager import hardware_manager +from inference import create_ltx_video_pipeline, calculate_padding +from ltx_video.pipelines.pipeline_ltx_video import LatentConditioningItem +from ltx_video.models.autoencoders.vae_encode import vae_decode + +logger = logging.getLogger(__name__) + +class LtxWorker: + def __init__(self, device_id, ltx_config_file): + self.cpu_device = torch.device('cpu') + self.device = torch.device(device_id if torch.cuda.is_available() else 'cpu') + logger.info(f"LTX Worker ({self.device}): Inicializando com config '{ltx_config_file}'...") + + with open(ltx_config_file, "r") as file: + self.config = yaml.safe_load(file) + + self.is_distilled = "distilled" in self.config.get("checkpoint_path", "") + + models_dir = "downloaded_models_gradio" + + logger.info(f"LTX Worker ({self.device}): Carregando modelo para a CPU...") + model_path = os.path.join(models_dir, self.config["checkpoint_path"]) + if not os.path.exists(model_path): + model_path = huggingface_hub.hf_hub_download( + repo_id="Lightricks/LTX-Video", filename=self.config["checkpoint_path"], + local_dir=models_dir, local_dir_use_symlinks=False + ) + + self.pipeline = create_ltx_video_pipeline( + ckpt_path=model_path, precision=self.config["precision"], + text_encoder_model_name_or_path=self.config["text_encoder_model_name_or_path"], + sampler=self.config["sampler"], device='cpu' + ) + logger.info(f"LTX Worker ({self.device}): Modelo pronto na CPU. É um modelo destilado? {self.is_distilled}") + + if self.device.type == 'cuda' and can_optimize_fp8(): + logger.info(f"LTX Worker ({self.device}): GPU com suporte a FP8 detectada. Iniciando otimização...") + self.pipeline.to(self.device) + optimize_ltx_worker(self) + self.pipeline.to(self.cpu_device) + logger.info(f"LTX Worker ({self.device}): Otimização concluída. Modelo pronto.") + elif self.device.type == 'cuda': + logger.info(f"LTX Worker ({self.device}): Otimização FP8 não suportada ou desativada. Usando modelo padrão.") + + def to_gpu(self): + if self.device.type == 'cpu': return + logger.info(f"LTX Worker: Movendo pipeline para a GPU {self.device}...") + self.pipeline.to(self.device) + + def to_cpu(self): + if self.device.type == 'cpu': return + logger.info(f"LTX Worker: Descarregando pipeline da GPU {self.device}...") + self.pipeline.to('cpu') + gc.collect() + if torch.cuda.is_available(): torch.cuda.empty_cache() + + def generate_video_fragment_internal(self, **kwargs): + return self.pipeline(**kwargs).images + +class LtxPoolManager: + def __init__(self, device_ids, ltx_config_file): + logger.info(f"LTX POOL MANAGER: Criando workers para os dispositivos: {device_ids}") + self.workers = [LtxWorker(dev_id, ltx_config_file) for dev_id in device_ids] + self.current_worker_index = 0 + self.lock = threading.Lock() + self.last_cleanup_thread = None + + def _cleanup_worker_thread(self, worker): + logger.info(f"LTX CLEANUP THREAD: Iniciando limpeza de {worker.device} em background...") + worker.to_cpu() + + def _prepare_and_log_params(self, worker_to_use, **kwargs): + target_device = worker_to_use.device + height, width = kwargs['height'], kwargs['width'] + + conditioning_data = kwargs.get('conditioning_items_data', []) + final_conditioning_items = [] + + # --- LOG ADICIONADO: Detalhes dos tensores de condicionamento --- + conditioning_log_details = [] + for i, item in enumerate(conditioning_data): + if hasattr(item, 'latent_tensor'): + item.latent_tensor = item.latent_tensor.to(target_device) + final_conditioning_items.append(item) + conditioning_log_details.append( + f" - Item {i}: frame={item.media_frame_number}, strength={item.conditioning_strength:.2f}, shape={list(item.latent_tensor.shape)}" + ) + + first_pass_config = worker_to_use.config.get("first_pass", {}) + padded_h, padded_w = ((height - 1) // 32 + 1) * 32, ((width - 1) // 32 + 1) * 32 + padding_vals = calculate_padding(height, width, padded_h, padded_w) + + pipeline_params = { + "height": padded_h, "width": padded_w, + "num_frames": kwargs['video_total_frames'], "frame_rate": kwargs['video_fps'], + "generator": torch.Generator(device=target_device).manual_seed(int(kwargs.get('seed', time.time())) + kwargs['current_fragment_index']), + "conditioning_items": final_conditioning_items, + "is_video": True, "vae_per_channel_normalize": True, + "decode_timestep": float(kwargs.get('decode_timestep', worker_to_use.config.get("decode_timestep", 0.05))), + "decode_noise_scale": float(kwargs.get('decode_noise_scale', worker_to_use.config.get("decode_noise_scale", 0.025))), + "image_cond_noise_scale": float(kwargs.get('image_cond_noise_scale', 0.0)), + "stochastic_sampling": bool(kwargs.get('stochastic_sampling', worker_to_use.config.get("stochastic_sampling", False))), + "prompt": kwargs['motion_prompt'], + "negative_prompt": kwargs.get('negative_prompt', "blurry, distorted, static, bad quality, artifacts"), + "guidance_scale": float(kwargs.get('guidance_scale', 1.0)), + "stg_scale": float(kwargs.get('stg_scale', 0.0)), + "rescaling_scale": float(kwargs.get('rescaling_scale', 1.0)), + } + + if worker_to_use.is_distilled: + pipeline_params["timesteps"] = first_pass_config.get("timesteps") + pipeline_params["num_inference_steps"] = len(pipeline_params["timesteps"]) if "timesteps" in first_pass_config else 8 + else: + pipeline_params["num_inference_steps"] = int(kwargs.get('num_inference_steps', 7)) + + # --- LOG ADICIONADO: Exibição completa dos parâmetros da pipeline --- + log_friendly_params = pipeline_params.copy() + log_friendly_params.pop('generator', None) + log_friendly_params.pop('conditioning_items', None) + + logger.info("="*60) + logger.info(f"CHAMADA AO PIPELINE LTX NO DISPOSITIVO: {worker_to_use.device}") + logger.info(f"Modelo: {'Distilled' if worker_to_use.is_distilled else 'Base'}") + logger.info("-" * 20 + " PARÂMETROS DA PIPELINE " + "-" * 20) + logger.info(json.dumps(log_friendly_params, indent=2)) + logger.info("-" * 20 + " ITENS DE CONDICIONAMENTO " + "-" * 19) + logger.info("\n".join(conditioning_log_details)) + logger.info("="*60) + # --- FIM DO LOG ADICIONADO --- + + return pipeline_params, padding_vals + + def generate_latent_fragment(self, **kwargs) -> (torch.Tensor, tuple): + worker_to_use = None + progress = kwargs.get('progress') + try: + with self.lock: + if self.last_cleanup_thread and self.last_cleanup_thread.is_alive(): + self.last_cleanup_thread.join() + worker_to_use = self.workers[self.current_worker_index] + previous_worker_index = (self.current_worker_index - 1 + len(self.workers)) % len(self.workers) + worker_to_cleanup = self.workers[previous_worker_index] + cleanup_thread = threading.Thread(target=self._cleanup_worker_thread, args=(worker_to_cleanup,)) + cleanup_thread.start() + self.last_cleanup_thread = cleanup_thread + worker_to_use.to_gpu() + self.current_worker_index = (self.current_worker_index + 1) % len(self.workers) + + pipeline_params, padding_vals = self._prepare_and_log_params(worker_to_use, **kwargs) + pipeline_params['output_type'] = "latent" + + if progress: progress(0.1, desc=f"[Especialista LTX em {worker_to_use.device}] Gerando latentes...") + + with torch.no_grad(): + result_tensor = worker_to_use.generate_video_fragment_internal(**pipeline_params) + + return result_tensor, padding_vals + except Exception as e: + logger.error(f"LTX POOL MANAGER: Erro durante a geração de latentes: {e}", exc_info=True) + raise e + finally: + if worker_to_use: + logger.info(f"LTX POOL MANAGER: Executando limpeza final para {worker_to_use.device}...") + worker_to_use.to_cpu() + + +logger.info("Lendo config.yaml para inicializar o LTX Pool Manager...") +with open("config.yaml", 'r') as f: + config = yaml.safe_load(f) +ltx_gpus_required = config['specialists']['ltx']['gpus_required'] +ltx_device_ids = hardware_manager.allocate_gpus('LTX', ltx_gpus_required) +ltx_config_path = config['specialists']['ltx']['config_file'] +ltx_manager_singleton = LtxPoolManager(device_ids=ltx_device_ids, ltx_config_file=ltx_config_path) +logger.info("Especialista de Vídeo (LTX) pronto.") \ No newline at end of file diff --git a/ltx_video/LICENSE.txt b/ltx_video/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/ltx_video/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/ltx_video/README.md b/ltx_video/README.md new file mode 100644 index 0000000000000000000000000000000000000000..964c76ea7d615f5287e9283e23df316dad8bfdd4 --- /dev/null +++ b/ltx_video/README.md @@ -0,0 +1,135 @@ +# 🛠️ helpers/ - Ferramentas de IA de Terceiros Adaptadas para ADUC-SDR + +Esta pasta contém implementações adaptadas de modelos e utilitários de IA de terceiros, que servem como "especialistas" ou "ferramentas" de baixo nível para a arquitetura ADUC-SDR. + +**IMPORTANTE:** O conteúdo desta pasta é de autoria de seus respectivos idealizadores e desenvolvedores originais. Esta pasta **NÃO FAZ PARTE** do projeto principal ADUC-SDR em termos de sua arquitetura inovadora. Ela serve como um repositório para as **dependências diretas e modificadas** que os `DeformesXDEngines` (os estágios do "foguete" ADUC-SDR) invocam para realizar tarefas específicas (geração de imagem, vídeo, áudio). + +As modificações realizadas nos arquivos aqui presentes visam principalmente: +1. **Adaptação de Interfaces:** Padronizar as interfaces para que se encaixem no fluxo de orquestração do ADUC-SDR. +2. **Gerenciamento de Recursos:** Integrar lógicas de carregamento/descarregamento de modelos (GPU management) e configurações via arquivos YAML. +3. **Otimização de Fluxo:** Ajustar as pipelines para aceitar formatos de entrada mais eficientes (ex: tensores pré-codificados em vez de caminhos de mídia, pulando etapas de codificação/decodificação redundantes). + +--- + +## 📄 Licenciamento + +O conteúdo original dos projetos listados abaixo é licenciado sob a **Licença Apache 2.0**, ou outra licença especificada pelos autores originais. Todas as modificações e o uso desses arquivos dentro da estrutura `helpers/` do projeto ADUC-SDR estão em conformidade com os termos da **Licença Apache 2.0**. + +As licenças originais dos projetos podem ser encontradas nas suas respectivas fontes ou nos subdiretórios `incl_licenses/` dentro de cada módulo adaptado. + +--- + +## 🛠️ API dos Helpers e Guia de Uso + +Esta seção detalha como cada helper (agente especialista) deve ser utilizado dentro do ecossistema ADUC-SDR. Todos os agentes são instanciados como **singletons** no `hardware_manager.py` para garantir o gerenciamento centralizado de recursos de GPU. + +### **gemini_helpers.py (GeminiAgent)** + +* **Propósito:** Atua como o "Oráculo de Síntese Adaptativo", responsável por todas as tarefas de processamento de linguagem natural, como criação de storyboards, geração de prompts, e tomada de decisões narrativas. +* **Singleton Instance:** `gemini_agent_singleton` +* **Construtor:** `GeminiAgent()` + * Lê `configs/gemini_config.yaml` para obter o nome do modelo, parâmetros de inferência e caminhos de templates de prompt. A chave da API é lida da variável de ambiente `GEMINI_API_KEY`. +* **Métodos Públicos:** + * `generate_storyboard(prompt: str, num_keyframes: int, ref_image_paths: list[str])` + * **Inputs:** + * `prompt`: A ideia geral do filme (string). + * `num_keyframes`: O número de cenas a serem geradas (int). + * `ref_image_paths`: Lista de caminhos para as imagens de referência (list[str]). + * **Output:** `tuple[list[str], str]` (Uma tupla contendo a lista de strings do storyboard e um relatório textual da operação). + * `select_keyframes_from_pool(storyboard: list, base_image_paths: list[str], pool_image_paths: list[str])` + * **Inputs:** + * `storyboard`: A lista de strings do storyboard gerado. + * `base_image_paths`: Imagens de referência base (list[str]). + * `pool_image_paths`: O "banco de imagens" de onde selecionar (list[str]). + * **Output:** `tuple[list[str], str]` (Uma tupla contendo a lista de caminhos de imagens selecionadas e um relatório textual). + * `get_anticipatory_keyframe_prompt(...)` + * **Inputs:** Contexto narrativo e visual para gerar um prompt de imagem. + * **Output:** `tuple[str, str]` (Uma tupla contendo o prompt gerado para o modelo de imagem e um relatório textual). + * `get_initial_motion_prompt(...)` + * **Inputs:** Contexto narrativo e visual para a primeira transição de vídeo. + * **Output:** `tuple[str, str]` (Uma tupla contendo o prompt de movimento gerado e um relatório textual). + * `get_transition_decision(...)` + * **Inputs:** Contexto narrativo e visual para uma transição de vídeo intermediária. + * **Output:** `tuple[dict, str]` (Uma tupla contendo um dicionário `{"transition_type": "...", "motion_prompt": "..."}` e um relatório textual). + * `generate_audio_prompts(...)` + * **Inputs:** Contexto narrativo global. + * **Output:** `tuple[dict, str]` (Uma tupla contendo um dicionário `{"music_prompt": "...", "sfx_prompt": "..."}` e um relatório textual). + +### **flux_kontext_helpers.py (FluxPoolManager)** + +* **Propósito:** Especialista em geração de imagens de alta qualidade (keyframes) usando a pipeline FluxKontext. Gerencia um pool de workers para otimizar o uso de múltiplas GPUs. +* **Singleton Instance:** `flux_kontext_singleton` +* **Construtor:** `FluxPoolManager(device_ids: list[str], flux_config_file: str)` + * Lê `configs/flux_config.yaml`. +* **Método Público:** + * `generate_image(prompt: str, reference_images: list[Image.Image], width: int, height: int, seed: int = 42, callback: callable = None)` + * **Inputs:** + * `prompt`: Prompt textual para guiar a geração (string). + * `reference_images`: Lista de objetos `PIL.Image` como referência visual. + * `width`, `height`: Dimensões da imagem de saída (int). + * `seed`: Semente para reprodutibilidade (int). + * `callback`: Função de callback opcional para monitorar o progresso. + * **Output:** `PIL.Image.Image` (O objeto da imagem gerada). + +### **dreamo_helpers.py (DreamOAgent)** + +* **Propósito:** Especialista em geração de imagens de alta qualidade (keyframes) usando a pipeline DreamO, com capacidades avançadas de edição e estilo a partir de referências. +* **Singleton Instance:** `dreamo_agent_singleton` +* **Construtor:** `DreamOAgent(device_id: str = None)` + * Lê `configs/dreamo_config.yaml`. +* **Método Público:** + * `generate_image(prompt: str, reference_images: list[Image.Image], width: int, height: int)` + * **Inputs:** + * `prompt`: Prompt textual para guiar a geração (string). + * `reference_images`: Lista de objetos `PIL.Image` como referência visual. A lógica interna atribui a primeira imagem como `style` e as demais como `ip`. + * `width`, `height`: Dimensões da imagem de saída (int). + * **Output:** `PIL.Image.Image` (O objeto da imagem gerada). + +### **ltx_manager_helpers.py (LtxPoolManager)** + +* **Propósito:** Especialista na geração de fragmentos de vídeo no espaço latente usando a pipeline LTX-Video. Gerencia um pool de workers para otimizar o uso de múltiplas GPUs. +* **Singleton Instance:** `ltx_manager_singleton` +* **Construtor:** `LtxPoolManager(device_ids: list[str], ltx_model_config_file: str, ltx_global_config_file: str)` + * Lê o `ltx_global_config_file` e o `ltx_model_config_file` para configurar a pipeline. +* **Método Público:** + * `generate_latent_fragment(**kwargs)` + * **Inputs:** Dicionário de keyword arguments (`kwargs`) contendo todos os parâmetros da pipeline LTX, incluindo: + * `height`, `width`: Dimensões do vídeo (int). + * `video_total_frames`: Número total de frames a serem gerados (int). + * `video_fps`: Frames por segundo (int). + * `motion_prompt`: Prompt de movimento (string). + * `conditioning_items_data`: Lista de objetos `LatentConditioningItem` contendo os tensores latentes de condição. + * `guidance_scale`, `stg_scale`, `num_inference_steps`, etc. + * **Output:** `tuple[torch.Tensor, tuple]` (Uma tupla contendo o tensor latente gerado e os valores de padding utilizados). + +### **mmaudio_helper.py (MMAudioAgent)** + +* **Propósito:** Especialista em geração de áudio para um determinado fragmento de vídeo. +* **Singleton Instance:** `mmaudio_agent_singleton` +* **Construtor:** `MMAudioAgent(workspace_dir: str, device_id: str = None, mmaudio_config_file: str)` + * Lê `configs/mmaudio_config.yaml`. +* **Método Público:** + * `generate_audio_for_video(video_path: str, prompt: str, negative_prompt: str, duration_seconds: float)` + * **Inputs:** + * `video_path`: Caminho para o arquivo de vídeo silencioso (string). + * `prompt`: Prompt textual para guiar a geração de áudio (string). + * `negative_prompt`: Prompt negativo para áudio (string). + * `duration_seconds`: Duração exata do vídeo (float). + * **Output:** `str` (O caminho para o novo arquivo de vídeo com a faixa de áudio integrada). + +--- + +## 🔗 Projetos Originais e Atribuições +(A seção de atribuições e licenças permanece a mesma que definimos anteriormente) + +### DreamO +* **Repositório Original:** [https://github.com/bytedance/DreamO](https://github.com/bytedance/DreamO) +... + +### LTX-Video +* **Repositório Original:** [https://github.com/Lightricks/LTX-Video](https://github.com/Lightricks/LTX-Video) +... + +### MMAudio +* **Repositório Original:** [https://github.com/hkchengrex/MMAudio](https://github.com/hkchengrex/MMAudio) +... \ No newline at end of file diff --git a/ltx_video/__init__.py b/ltx_video/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/models/__init__.py b/ltx_video/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/models/autoencoders/__init__.py b/ltx_video/models/autoencoders/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/models/autoencoders/causal_conv3d.py b/ltx_video/models/autoencoders/causal_conv3d.py new file mode 100644 index 0000000000000000000000000000000000000000..98249c2f5ffe52eead83b38476e034c4f03bdccd --- /dev/null +++ b/ltx_video/models/autoencoders/causal_conv3d.py @@ -0,0 +1,63 @@ +from typing import Tuple, Union + +import torch +import torch.nn as nn + + +class CausalConv3d(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size: int = 3, + stride: Union[int, Tuple[int]] = 1, + dilation: int = 1, + groups: int = 1, + spatial_padding_mode: str = "zeros", + **kwargs, + ): + super().__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + + kernel_size = (kernel_size, kernel_size, kernel_size) + self.time_kernel_size = kernel_size[0] + + dilation = (dilation, 1, 1) + + height_pad = kernel_size[1] // 2 + width_pad = kernel_size[2] // 2 + padding = (0, height_pad, width_pad) + + self.conv = nn.Conv3d( + in_channels, + out_channels, + kernel_size, + stride=stride, + dilation=dilation, + padding=padding, + padding_mode=spatial_padding_mode, + groups=groups, + ) + + def forward(self, x, causal: bool = True): + if causal: + first_frame_pad = x[:, :, :1, :, :].repeat( + (1, 1, self.time_kernel_size - 1, 1, 1) + ) + x = torch.concatenate((first_frame_pad, x), dim=2) + else: + first_frame_pad = x[:, :, :1, :, :].repeat( + (1, 1, (self.time_kernel_size - 1) // 2, 1, 1) + ) + last_frame_pad = x[:, :, -1:, :, :].repeat( + (1, 1, (self.time_kernel_size - 1) // 2, 1, 1) + ) + x = torch.concatenate((first_frame_pad, x, last_frame_pad), dim=2) + x = self.conv(x) + return x + + @property + def weight(self): + return self.conv.weight diff --git a/ltx_video/models/autoencoders/causal_video_autoencoder.py b/ltx_video/models/autoencoders/causal_video_autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..736c96a3c65e22a7ada0bb20535e0e15bc47b123 --- /dev/null +++ b/ltx_video/models/autoencoders/causal_video_autoencoder.py @@ -0,0 +1,1398 @@ +import json +import os +from functools import partial +from types import SimpleNamespace +from typing import Any, Mapping, Optional, Tuple, Union, List +from pathlib import Path + +import torch +import numpy as np +from einops import rearrange +from torch import nn +from diffusers.utils import logging +import torch.nn.functional as F +from diffusers.models.embeddings import PixArtAlphaCombinedTimestepSizeEmbeddings +from safetensors import safe_open + + +from ltx_video.models.autoencoders.conv_nd_factory import make_conv_nd, make_linear_nd +from ltx_video.models.autoencoders.pixel_norm import PixelNorm +from ltx_video.models.autoencoders.pixel_shuffle import PixelShuffleND +from ltx_video.models.autoencoders.vae import AutoencoderKLWrapper +from ltx_video.models.transformers.attention import Attention +from ltx_video.utils.diffusers_config_mapping import ( + diffusers_and_ours_config_mapping, + make_hashable_key, + VAE_KEYS_RENAME_DICT, +) + +PER_CHANNEL_STATISTICS_PREFIX = "per_channel_statistics." +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +class CausalVideoAutoencoder(AutoencoderKLWrapper): + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + *args, + **kwargs, + ): + pretrained_model_name_or_path = Path(pretrained_model_name_or_path) + if ( + pretrained_model_name_or_path.is_dir() + and (pretrained_model_name_or_path / "autoencoder.pth").exists() + ): + config_local_path = pretrained_model_name_or_path / "config.json" + config = cls.load_config(config_local_path, **kwargs) + + model_local_path = pretrained_model_name_or_path / "autoencoder.pth" + state_dict = torch.load(model_local_path, map_location=torch.device("cpu")) + + statistics_local_path = ( + pretrained_model_name_or_path / "per_channel_statistics.json" + ) + if statistics_local_path.exists(): + with open(statistics_local_path, "r") as file: + data = json.load(file) + transposed_data = list(zip(*data["data"])) + data_dict = { + col: torch.tensor(vals) + for col, vals in zip(data["columns"], transposed_data) + } + std_of_means = data_dict["std-of-means"] + mean_of_means = data_dict.get( + "mean-of-means", torch.zeros_like(data_dict["std-of-means"]) + ) + state_dict[f"{PER_CHANNEL_STATISTICS_PREFIX}std-of-means"] = ( + std_of_means + ) + state_dict[f"{PER_CHANNEL_STATISTICS_PREFIX}mean-of-means"] = ( + mean_of_means + ) + + elif pretrained_model_name_or_path.is_dir(): + config_path = pretrained_model_name_or_path / "vae" / "config.json" + with open(config_path, "r") as f: + config = make_hashable_key(json.load(f)) + + assert config in diffusers_and_ours_config_mapping, ( + "Provided diffusers checkpoint config for VAE is not suppported. " + "We only support diffusers configs found in Lightricks/LTX-Video." + ) + + config = diffusers_and_ours_config_mapping[config] + + state_dict_path = ( + pretrained_model_name_or_path + / "vae" + / "diffusion_pytorch_model.safetensors" + ) + + state_dict = {} + with safe_open(state_dict_path, framework="pt", device="cpu") as f: + for k in f.keys(): + state_dict[k] = f.get_tensor(k) + for key in list(state_dict.keys()): + new_key = key + for replace_key, rename_key in VAE_KEYS_RENAME_DICT.items(): + new_key = new_key.replace(replace_key, rename_key) + + state_dict[new_key] = state_dict.pop(key) + + elif pretrained_model_name_or_path.is_file() and str( + pretrained_model_name_or_path + ).endswith(".safetensors"): + state_dict = {} + with safe_open( + pretrained_model_name_or_path, framework="pt", device="cpu" + ) as f: + metadata = f.metadata() + for k in f.keys(): + state_dict[k] = f.get_tensor(k) + configs = json.loads(metadata["config"]) + config = configs["vae"] + + video_vae = cls.from_config(config) + if "torch_dtype" in kwargs: + video_vae.to(kwargs["torch_dtype"]) + video_vae.load_state_dict(state_dict) + return video_vae + + @staticmethod + def from_config(config): + assert ( + config["_class_name"] == "CausalVideoAutoencoder" + ), "config must have _class_name=CausalVideoAutoencoder" + if isinstance(config["dims"], list): + config["dims"] = tuple(config["dims"]) + + assert config["dims"] in [2, 3, (2, 1)], "dims must be 2, 3 or (2, 1)" + + double_z = config.get("double_z", True) + latent_log_var = config.get( + "latent_log_var", "per_channel" if double_z else "none" + ) + use_quant_conv = config.get("use_quant_conv", True) + normalize_latent_channels = config.get("normalize_latent_channels", False) + + if use_quant_conv and latent_log_var in ["uniform", "constant"]: + raise ValueError( + f"latent_log_var={latent_log_var} requires use_quant_conv=False" + ) + + encoder = Encoder( + dims=config["dims"], + in_channels=config.get("in_channels", 3), + out_channels=config["latent_channels"], + blocks=config.get("encoder_blocks", config.get("blocks")), + patch_size=config.get("patch_size", 1), + latent_log_var=latent_log_var, + norm_layer=config.get("norm_layer", "group_norm"), + base_channels=config.get("encoder_base_channels", 128), + spatial_padding_mode=config.get("spatial_padding_mode", "zeros"), + ) + + decoder = Decoder( + dims=config["dims"], + in_channels=config["latent_channels"], + out_channels=config.get("out_channels", 3), + blocks=config.get("decoder_blocks", config.get("blocks")), + patch_size=config.get("patch_size", 1), + norm_layer=config.get("norm_layer", "group_norm"), + causal=config.get("causal_decoder", False), + timestep_conditioning=config.get("timestep_conditioning", False), + base_channels=config.get("decoder_base_channels", 128), + spatial_padding_mode=config.get("spatial_padding_mode", "zeros"), + ) + + dims = config["dims"] + return CausalVideoAutoencoder( + encoder=encoder, + decoder=decoder, + latent_channels=config["latent_channels"], + dims=dims, + use_quant_conv=use_quant_conv, + normalize_latent_channels=normalize_latent_channels, + ) + + @property + def config(self): + return SimpleNamespace( + _class_name="CausalVideoAutoencoder", + dims=self.dims, + in_channels=self.encoder.conv_in.in_channels // self.encoder.patch_size**2, + out_channels=self.decoder.conv_out.out_channels + // self.decoder.patch_size**2, + latent_channels=self.decoder.conv_in.in_channels, + encoder_blocks=self.encoder.blocks_desc, + decoder_blocks=self.decoder.blocks_desc, + scaling_factor=1.0, + norm_layer=self.encoder.norm_layer, + patch_size=self.encoder.patch_size, + latent_log_var=self.encoder.latent_log_var, + use_quant_conv=self.use_quant_conv, + causal_decoder=self.decoder.causal, + timestep_conditioning=self.decoder.timestep_conditioning, + normalize_latent_channels=self.normalize_latent_channels, + ) + + @property + def is_video_supported(self): + """ + Check if the model supports video inputs of shape (B, C, F, H, W). Otherwise, the model only supports 2D images. + """ + return self.dims != 2 + + @property + def spatial_downscale_factor(self): + return ( + 2 + ** len( + [ + block + for block in self.encoder.blocks_desc + if block[0] + in [ + "compress_space", + "compress_all", + "compress_all_res", + "compress_space_res", + ] + ] + ) + * self.encoder.patch_size + ) + + @property + def temporal_downscale_factor(self): + return 2 ** len( + [ + block + for block in self.encoder.blocks_desc + if block[0] + in [ + "compress_time", + "compress_all", + "compress_all_res", + "compress_time_res", + ] + ] + ) + + def to_json_string(self) -> str: + import json + + return json.dumps(self.config.__dict__) + + def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True): + if any([key.startswith("vae.") for key in state_dict.keys()]): + state_dict = { + key.replace("vae.", ""): value + for key, value in state_dict.items() + if key.startswith("vae.") + } + ckpt_state_dict = { + key: value + for key, value in state_dict.items() + if not key.startswith(PER_CHANNEL_STATISTICS_PREFIX) + } + + model_keys = set(name for name, _ in self.named_modules()) + + key_mapping = { + ".resnets.": ".res_blocks.", + "downsamplers.0": "downsample", + "upsamplers.0": "upsample", + } + converted_state_dict = {} + for key, value in ckpt_state_dict.items(): + for k, v in key_mapping.items(): + key = key.replace(k, v) + + key_prefix = ".".join(key.split(".")[:-1]) + if "norm" in key and key_prefix not in model_keys: + logger.info( + f"Removing key {key} from state_dict as it is not present in the model" + ) + continue + + converted_state_dict[key] = value + + super().load_state_dict(converted_state_dict, strict=strict) + + data_dict = { + key.removeprefix(PER_CHANNEL_STATISTICS_PREFIX): value + for key, value in state_dict.items() + if key.startswith(PER_CHANNEL_STATISTICS_PREFIX) + } + if len(data_dict) > 0: + self.register_buffer("std_of_means", data_dict["std-of-means"]) + self.register_buffer( + "mean_of_means", + data_dict.get( + "mean-of-means", torch.zeros_like(data_dict["std-of-means"]) + ), + ) + + def last_layer(self): + if hasattr(self.decoder, "conv_out"): + if isinstance(self.decoder.conv_out, nn.Sequential): + last_layer = self.decoder.conv_out[-1] + else: + last_layer = self.decoder.conv_out + else: + last_layer = self.decoder.layers[-1] + return last_layer + + def set_use_tpu_flash_attention(self): + for block in self.decoder.up_blocks: + if isinstance(block, UNetMidBlock3D) and block.attention_blocks: + for attention_block in block.attention_blocks: + attention_block.set_use_tpu_flash_attention() + + +class Encoder(nn.Module): + r""" + The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation. + + Args: + dims (`int` or `Tuple[int, int]`, *optional*, defaults to 3): + The number of dimensions to use in convolutions. + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + blocks (`List[Tuple[str, int]]`, *optional*, defaults to `[("res_x", 1)]`): + The blocks to use. Each block is a tuple of the block name and the number of layers. + base_channels (`int`, *optional*, defaults to 128): + The number of output channels for the first convolutional layer. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + patch_size (`int`, *optional*, defaults to 1): + The patch size to use. Should be a power of 2. + norm_layer (`str`, *optional*, defaults to `group_norm`): + The normalization layer to use. Can be either `group_norm` or `pixel_norm`. + latent_log_var (`str`, *optional*, defaults to `per_channel`): + The number of channels for the log variance. Can be either `per_channel`, `uniform`, `constant` or `none`. + """ + + def __init__( + self, + dims: Union[int, Tuple[int, int]] = 3, + in_channels: int = 3, + out_channels: int = 3, + blocks: List[Tuple[str, int | dict]] = [("res_x", 1)], + base_channels: int = 128, + norm_num_groups: int = 32, + patch_size: Union[int, Tuple[int]] = 1, + norm_layer: str = "group_norm", # group_norm, pixel_norm + latent_log_var: str = "per_channel", + spatial_padding_mode: str = "zeros", + ): + super().__init__() + self.patch_size = patch_size + self.norm_layer = norm_layer + self.latent_channels = out_channels + self.latent_log_var = latent_log_var + self.blocks_desc = blocks + + in_channels = in_channels * patch_size**2 + output_channel = base_channels + + self.conv_in = make_conv_nd( + dims=dims, + in_channels=in_channels, + out_channels=output_channel, + kernel_size=3, + stride=1, + padding=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + self.down_blocks = nn.ModuleList([]) + + for block_name, block_params in blocks: + input_channel = output_channel + if isinstance(block_params, int): + block_params = {"num_layers": block_params} + + if block_name == "res_x": + block = UNetMidBlock3D( + dims=dims, + in_channels=input_channel, + num_layers=block_params["num_layers"], + resnet_eps=1e-6, + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "res_x_y": + output_channel = block_params.get("multiplier", 2) * output_channel + block = ResnetBlock3D( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + eps=1e-6, + groups=norm_num_groups, + norm_layer=norm_layer, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_time": + block = make_conv_nd( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + kernel_size=3, + stride=(2, 1, 1), + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_space": + block = make_conv_nd( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + kernel_size=3, + stride=(1, 2, 2), + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_all": + block = make_conv_nd( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + kernel_size=3, + stride=(2, 2, 2), + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_all_x_y": + output_channel = block_params.get("multiplier", 2) * output_channel + block = make_conv_nd( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + kernel_size=3, + stride=(2, 2, 2), + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_all_res": + output_channel = block_params.get("multiplier", 2) * output_channel + block = SpaceToDepthDownsample( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + stride=(2, 2, 2), + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_space_res": + output_channel = block_params.get("multiplier", 2) * output_channel + block = SpaceToDepthDownsample( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + stride=(1, 2, 2), + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_time_res": + output_channel = block_params.get("multiplier", 2) * output_channel + block = SpaceToDepthDownsample( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + stride=(2, 1, 1), + spatial_padding_mode=spatial_padding_mode, + ) + else: + raise ValueError(f"unknown block: {block_name}") + + self.down_blocks.append(block) + + # out + if norm_layer == "group_norm": + self.conv_norm_out = nn.GroupNorm( + num_channels=output_channel, num_groups=norm_num_groups, eps=1e-6 + ) + elif norm_layer == "pixel_norm": + self.conv_norm_out = PixelNorm() + elif norm_layer == "layer_norm": + self.conv_norm_out = LayerNorm(output_channel, eps=1e-6) + + self.conv_act = nn.SiLU() + + conv_out_channels = out_channels + if latent_log_var == "per_channel": + conv_out_channels *= 2 + elif latent_log_var == "uniform": + conv_out_channels += 1 + elif latent_log_var == "constant": + conv_out_channels += 1 + elif latent_log_var != "none": + raise ValueError(f"Invalid latent_log_var: {latent_log_var}") + self.conv_out = make_conv_nd( + dims, + output_channel, + conv_out_channels, + 3, + padding=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + self.gradient_checkpointing = False + + def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor: + r"""The forward method of the `Encoder` class.""" + + sample = patchify(sample, patch_size_hw=self.patch_size, patch_size_t=1) + sample = self.conv_in(sample) + + checkpoint_fn = ( + partial(torch.utils.checkpoint.checkpoint, use_reentrant=False) + if self.gradient_checkpointing and self.training + else lambda x: x + ) + + for down_block in self.down_blocks: + sample = checkpoint_fn(down_block)(sample) + + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if self.latent_log_var == "uniform": + last_channel = sample[:, -1:, ...] + num_dims = sample.dim() + + if num_dims == 4: + # For shape (B, C, H, W) + repeated_last_channel = last_channel.repeat( + 1, sample.shape[1] - 2, 1, 1 + ) + sample = torch.cat([sample, repeated_last_channel], dim=1) + elif num_dims == 5: + # For shape (B, C, F, H, W) + repeated_last_channel = last_channel.repeat( + 1, sample.shape[1] - 2, 1, 1, 1 + ) + sample = torch.cat([sample, repeated_last_channel], dim=1) + else: + raise ValueError(f"Invalid input shape: {sample.shape}") + elif self.latent_log_var == "constant": + sample = sample[:, :-1, ...] + approx_ln_0 = ( + -30 + ) # this is the minimal clamp value in DiagonalGaussianDistribution objects + sample = torch.cat( + [sample, torch.ones_like(sample, device=sample.device) * approx_ln_0], + dim=1, + ) + + return sample + + +class Decoder(nn.Module): + r""" + The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample. + + Args: + dims (`int` or `Tuple[int, int]`, *optional*, defaults to 3): + The number of dimensions to use in convolutions. + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + blocks (`List[Tuple[str, int]]`, *optional*, defaults to `[("res_x", 1)]`): + The blocks to use. Each block is a tuple of the block name and the number of layers. + base_channels (`int`, *optional*, defaults to 128): + The number of output channels for the first convolutional layer. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + patch_size (`int`, *optional*, defaults to 1): + The patch size to use. Should be a power of 2. + norm_layer (`str`, *optional*, defaults to `group_norm`): + The normalization layer to use. Can be either `group_norm` or `pixel_norm`. + causal (`bool`, *optional*, defaults to `True`): + Whether to use causal convolutions or not. + """ + + def __init__( + self, + dims, + in_channels: int = 3, + out_channels: int = 3, + blocks: List[Tuple[str, int | dict]] = [("res_x", 1)], + base_channels: int = 128, + layers_per_block: int = 2, + norm_num_groups: int = 32, + patch_size: int = 1, + norm_layer: str = "group_norm", + causal: bool = True, + timestep_conditioning: bool = False, + spatial_padding_mode: str = "zeros", + ): + super().__init__() + self.patch_size = patch_size + self.layers_per_block = layers_per_block + out_channels = out_channels * patch_size**2 + self.causal = causal + self.blocks_desc = blocks + + # Compute output channel to be product of all channel-multiplier blocks + output_channel = base_channels + for block_name, block_params in list(reversed(blocks)): + block_params = block_params if isinstance(block_params, dict) else {} + if block_name == "res_x_y": + output_channel = output_channel * block_params.get("multiplier", 2) + if block_name.startswith("compress"): + output_channel = output_channel * block_params.get("multiplier", 1) + + self.conv_in = make_conv_nd( + dims, + in_channels, + output_channel, + kernel_size=3, + stride=1, + padding=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + self.up_blocks = nn.ModuleList([]) + + for block_name, block_params in list(reversed(blocks)): + input_channel = output_channel + if isinstance(block_params, int): + block_params = {"num_layers": block_params} + + if block_name == "res_x": + block = UNetMidBlock3D( + dims=dims, + in_channels=input_channel, + num_layers=block_params["num_layers"], + resnet_eps=1e-6, + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + inject_noise=block_params.get("inject_noise", False), + timestep_conditioning=timestep_conditioning, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "attn_res_x": + block = UNetMidBlock3D( + dims=dims, + in_channels=input_channel, + num_layers=block_params["num_layers"], + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + inject_noise=block_params.get("inject_noise", False), + timestep_conditioning=timestep_conditioning, + attention_head_dim=block_params["attention_head_dim"], + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "res_x_y": + output_channel = output_channel // block_params.get("multiplier", 2) + block = ResnetBlock3D( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + eps=1e-6, + groups=norm_num_groups, + norm_layer=norm_layer, + inject_noise=block_params.get("inject_noise", False), + timestep_conditioning=False, + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_time": + block = DepthToSpaceUpsample( + dims=dims, + in_channels=input_channel, + stride=(2, 1, 1), + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_space": + block = DepthToSpaceUpsample( + dims=dims, + in_channels=input_channel, + stride=(1, 2, 2), + spatial_padding_mode=spatial_padding_mode, + ) + elif block_name == "compress_all": + output_channel = output_channel // block_params.get("multiplier", 1) + block = DepthToSpaceUpsample( + dims=dims, + in_channels=input_channel, + stride=(2, 2, 2), + residual=block_params.get("residual", False), + out_channels_reduction_factor=block_params.get("multiplier", 1), + spatial_padding_mode=spatial_padding_mode, + ) + else: + raise ValueError(f"unknown layer: {block_name}") + + self.up_blocks.append(block) + + if norm_layer == "group_norm": + self.conv_norm_out = nn.GroupNorm( + num_channels=output_channel, num_groups=norm_num_groups, eps=1e-6 + ) + elif norm_layer == "pixel_norm": + self.conv_norm_out = PixelNorm() + elif norm_layer == "layer_norm": + self.conv_norm_out = LayerNorm(output_channel, eps=1e-6) + + self.conv_act = nn.SiLU() + self.conv_out = make_conv_nd( + dims, + output_channel, + out_channels, + 3, + padding=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + self.gradient_checkpointing = False + + self.timestep_conditioning = timestep_conditioning + + if timestep_conditioning: + self.timestep_scale_multiplier = nn.Parameter( + torch.tensor(1000.0, dtype=torch.float32) + ) + self.last_time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings( + output_channel * 2, 0 + ) + self.last_scale_shift_table = nn.Parameter( + torch.randn(2, output_channel) / output_channel**0.5 + ) + + def forward( + self, + sample: torch.FloatTensor, + target_shape, + timestep: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + r"""The forward method of the `Decoder` class.""" + assert target_shape is not None, "target_shape must be provided" + batch_size = sample.shape[0] + + sample = self.conv_in(sample, causal=self.causal) + + upscale_dtype = next(iter(self.up_blocks.parameters())).dtype + + checkpoint_fn = ( + partial(torch.utils.checkpoint.checkpoint, use_reentrant=False) + if self.gradient_checkpointing and self.training + else lambda x: x + ) + + sample = sample.to(upscale_dtype) + + if self.timestep_conditioning: + assert ( + timestep is not None + ), "should pass timestep with timestep_conditioning=True" + scaled_timestep = timestep * self.timestep_scale_multiplier + + for up_block in self.up_blocks: + if self.timestep_conditioning and isinstance(up_block, UNetMidBlock3D): + sample = checkpoint_fn(up_block)( + sample, causal=self.causal, timestep=scaled_timestep + ) + else: + sample = checkpoint_fn(up_block)(sample, causal=self.causal) + + sample = self.conv_norm_out(sample) + + if self.timestep_conditioning: + embedded_timestep = self.last_time_embedder( + timestep=scaled_timestep.flatten(), + resolution=None, + aspect_ratio=None, + batch_size=sample.shape[0], + hidden_dtype=sample.dtype, + ) + embedded_timestep = embedded_timestep.view( + batch_size, embedded_timestep.shape[-1], 1, 1, 1 + ) + ada_values = self.last_scale_shift_table[ + None, ..., None, None, None + ] + embedded_timestep.reshape( + batch_size, + 2, + -1, + embedded_timestep.shape[-3], + embedded_timestep.shape[-2], + embedded_timestep.shape[-1], + ) + shift, scale = ada_values.unbind(dim=1) + sample = sample * (1 + scale) + shift + + sample = self.conv_act(sample) + sample = self.conv_out(sample, causal=self.causal) + + sample = unpatchify(sample, patch_size_hw=self.patch_size, patch_size_t=1) + + return sample + + +class UNetMidBlock3D(nn.Module): + """ + A 3D UNet mid-block [`UNetMidBlock3D`] with multiple residual blocks. + + Args: + in_channels (`int`): The number of input channels. + dropout (`float`, *optional*, defaults to 0.0): The dropout rate. + num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. + resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. + resnet_groups (`int`, *optional*, defaults to 32): + The number of groups to use in the group normalization layers of the resnet blocks. + norm_layer (`str`, *optional*, defaults to `group_norm`): + The normalization layer to use. Can be either `group_norm` or `pixel_norm`. + inject_noise (`bool`, *optional*, defaults to `False`): + Whether to inject noise into the hidden states. + timestep_conditioning (`bool`, *optional*, defaults to `False`): + Whether to condition the hidden states on the timestep. + attention_head_dim (`int`, *optional*, defaults to -1): + The dimension of the attention head. If -1, no attention is used. + + Returns: + `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size, + in_channels, height, width)`. + + """ + + def __init__( + self, + dims: Union[int, Tuple[int, int]], + in_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_groups: int = 32, + norm_layer: str = "group_norm", + inject_noise: bool = False, + timestep_conditioning: bool = False, + attention_head_dim: int = -1, + spatial_padding_mode: str = "zeros", + ): + super().__init__() + resnet_groups = ( + resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + ) + self.timestep_conditioning = timestep_conditioning + + if timestep_conditioning: + self.time_embedder = PixArtAlphaCombinedTimestepSizeEmbeddings( + in_channels * 4, 0 + ) + + self.res_blocks = nn.ModuleList( + [ + ResnetBlock3D( + dims=dims, + in_channels=in_channels, + out_channels=in_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + norm_layer=norm_layer, + inject_noise=inject_noise, + timestep_conditioning=timestep_conditioning, + spatial_padding_mode=spatial_padding_mode, + ) + for _ in range(num_layers) + ] + ) + + self.attention_blocks = None + + if attention_head_dim > 0: + if attention_head_dim > in_channels: + raise ValueError( + "attention_head_dim must be less than or equal to in_channels" + ) + + self.attention_blocks = nn.ModuleList( + [ + Attention( + query_dim=in_channels, + heads=in_channels // attention_head_dim, + dim_head=attention_head_dim, + bias=True, + out_bias=True, + qk_norm="rms_norm", + residual_connection=True, + ) + for _ in range(num_layers) + ] + ) + + def forward( + self, + hidden_states: torch.FloatTensor, + causal: bool = True, + timestep: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + timestep_embed = None + if self.timestep_conditioning: + assert ( + timestep is not None + ), "should pass timestep with timestep_conditioning=True" + batch_size = hidden_states.shape[0] + timestep_embed = self.time_embedder( + timestep=timestep.flatten(), + resolution=None, + aspect_ratio=None, + batch_size=batch_size, + hidden_dtype=hidden_states.dtype, + ) + timestep_embed = timestep_embed.view( + batch_size, timestep_embed.shape[-1], 1, 1, 1 + ) + + if self.attention_blocks: + for resnet, attention in zip(self.res_blocks, self.attention_blocks): + hidden_states = resnet( + hidden_states, causal=causal, timestep=timestep_embed + ) + + # Reshape the hidden states to be (batch_size, frames * height * width, channel) + batch_size, channel, frames, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, frames * height * width + ).transpose(1, 2) + + if attention.use_tpu_flash_attention: + # Pad the second dimension to be divisible by block_k_major (block in flash attention) + seq_len = hidden_states.shape[1] + block_k_major = 512 + pad_len = (block_k_major - seq_len % block_k_major) % block_k_major + if pad_len > 0: + hidden_states = F.pad( + hidden_states, (0, 0, 0, pad_len), "constant", 0 + ) + + # Create a mask with ones for the original sequence length and zeros for the padded indexes + mask = torch.ones( + (hidden_states.shape[0], seq_len), + device=hidden_states.device, + dtype=hidden_states.dtype, + ) + if pad_len > 0: + mask = F.pad(mask, (0, pad_len), "constant", 0) + + hidden_states = attention( + hidden_states, + attention_mask=( + None if not attention.use_tpu_flash_attention else mask + ), + ) + + if attention.use_tpu_flash_attention: + # Remove the padding + if pad_len > 0: + hidden_states = hidden_states[:, :-pad_len, :] + + # Reshape the hidden states back to (batch_size, channel, frames, height, width, channel) + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, frames, height, width + ) + else: + for resnet in self.res_blocks: + hidden_states = resnet( + hidden_states, causal=causal, timestep=timestep_embed + ) + + return hidden_states + + +class SpaceToDepthDownsample(nn.Module): + def __init__(self, dims, in_channels, out_channels, stride, spatial_padding_mode): + super().__init__() + self.stride = stride + self.group_size = in_channels * np.prod(stride) // out_channels + self.conv = make_conv_nd( + dims=dims, + in_channels=in_channels, + out_channels=out_channels // np.prod(stride), + kernel_size=3, + stride=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + def forward(self, x, causal: bool = True): + if self.stride[0] == 2: + x = torch.cat( + [x[:, :, :1, :, :], x], dim=2 + ) # duplicate first frames for padding + + # skip connection + x_in = rearrange( + x, + "b c (d p1) (h p2) (w p3) -> b (c p1 p2 p3) d h w", + p1=self.stride[0], + p2=self.stride[1], + p3=self.stride[2], + ) + x_in = rearrange(x_in, "b (c g) d h w -> b c g d h w", g=self.group_size) + x_in = x_in.mean(dim=2) + + # conv + x = self.conv(x, causal=causal) + x = rearrange( + x, + "b c (d p1) (h p2) (w p3) -> b (c p1 p2 p3) d h w", + p1=self.stride[0], + p2=self.stride[1], + p3=self.stride[2], + ) + + x = x + x_in + + return x + + +class DepthToSpaceUpsample(nn.Module): + def __init__( + self, + dims, + in_channels, + stride, + residual=False, + out_channels_reduction_factor=1, + spatial_padding_mode="zeros", + ): + super().__init__() + self.stride = stride + self.out_channels = ( + np.prod(stride) * in_channels // out_channels_reduction_factor + ) + self.conv = make_conv_nd( + dims=dims, + in_channels=in_channels, + out_channels=self.out_channels, + kernel_size=3, + stride=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + self.pixel_shuffle = PixelShuffleND(dims=dims, upscale_factors=stride) + self.residual = residual + self.out_channels_reduction_factor = out_channels_reduction_factor + + def forward(self, x, causal: bool = True): + if self.residual: + # Reshape and duplicate the input to match the output shape + x_in = self.pixel_shuffle(x) + num_repeat = np.prod(self.stride) // self.out_channels_reduction_factor + x_in = x_in.repeat(1, num_repeat, 1, 1, 1) + if self.stride[0] == 2: + x_in = x_in[:, :, 1:, :, :] + x = self.conv(x, causal=causal) + x = self.pixel_shuffle(x) + if self.stride[0] == 2: + x = x[:, :, 1:, :, :] + if self.residual: + x = x + x_in + return x + + +class LayerNorm(nn.Module): + def __init__(self, dim, eps, elementwise_affine=True) -> None: + super().__init__() + self.norm = nn.LayerNorm(dim, eps=eps, elementwise_affine=elementwise_affine) + + def forward(self, x): + x = rearrange(x, "b c d h w -> b d h w c") + x = self.norm(x) + x = rearrange(x, "b d h w c -> b c d h w") + return x + + +class ResnetBlock3D(nn.Module): + r""" + A Resnet block. + + Parameters: + in_channels (`int`): The number of channels in the input. + out_channels (`int`, *optional*, default to be `None`): + The number of output channels for the first conv layer. If None, same as `in_channels`. + dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. + groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. + eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. + """ + + def __init__( + self, + dims: Union[int, Tuple[int, int]], + in_channels: int, + out_channels: Optional[int] = None, + dropout: float = 0.0, + groups: int = 32, + eps: float = 1e-6, + norm_layer: str = "group_norm", + inject_noise: bool = False, + timestep_conditioning: bool = False, + spatial_padding_mode: str = "zeros", + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.inject_noise = inject_noise + + if norm_layer == "group_norm": + self.norm1 = nn.GroupNorm( + num_groups=groups, num_channels=in_channels, eps=eps, affine=True + ) + elif norm_layer == "pixel_norm": + self.norm1 = PixelNorm() + elif norm_layer == "layer_norm": + self.norm1 = LayerNorm(in_channels, eps=eps, elementwise_affine=True) + + self.non_linearity = nn.SiLU() + + self.conv1 = make_conv_nd( + dims, + in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + if inject_noise: + self.per_channel_scale1 = nn.Parameter(torch.zeros((in_channels, 1, 1))) + + if norm_layer == "group_norm": + self.norm2 = nn.GroupNorm( + num_groups=groups, num_channels=out_channels, eps=eps, affine=True + ) + elif norm_layer == "pixel_norm": + self.norm2 = PixelNorm() + elif norm_layer == "layer_norm": + self.norm2 = LayerNorm(out_channels, eps=eps, elementwise_affine=True) + + self.dropout = torch.nn.Dropout(dropout) + + self.conv2 = make_conv_nd( + dims, + out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1, + causal=True, + spatial_padding_mode=spatial_padding_mode, + ) + + if inject_noise: + self.per_channel_scale2 = nn.Parameter(torch.zeros((in_channels, 1, 1))) + + self.conv_shortcut = ( + make_linear_nd( + dims=dims, in_channels=in_channels, out_channels=out_channels + ) + if in_channels != out_channels + else nn.Identity() + ) + + self.norm3 = ( + LayerNorm(in_channels, eps=eps, elementwise_affine=True) + if in_channels != out_channels + else nn.Identity() + ) + + self.timestep_conditioning = timestep_conditioning + + if timestep_conditioning: + self.scale_shift_table = nn.Parameter( + torch.randn(4, in_channels) / in_channels**0.5 + ) + + def _feed_spatial_noise( + self, hidden_states: torch.FloatTensor, per_channel_scale: torch.FloatTensor + ) -> torch.FloatTensor: + spatial_shape = hidden_states.shape[-2:] + device = hidden_states.device + dtype = hidden_states.dtype + + # similar to the "explicit noise inputs" method in style-gan + spatial_noise = torch.randn(spatial_shape, device=device, dtype=dtype)[None] + scaled_noise = (spatial_noise * per_channel_scale)[None, :, None, ...] + hidden_states = hidden_states + scaled_noise + + return hidden_states + + def forward( + self, + input_tensor: torch.FloatTensor, + causal: bool = True, + timestep: Optional[torch.Tensor] = None, + ) -> torch.FloatTensor: + hidden_states = input_tensor + batch_size = hidden_states.shape[0] + + hidden_states = self.norm1(hidden_states) + if self.timestep_conditioning: + assert ( + timestep is not None + ), "should pass timestep with timestep_conditioning=True" + ada_values = self.scale_shift_table[ + None, ..., None, None, None + ] + timestep.reshape( + batch_size, + 4, + -1, + timestep.shape[-3], + timestep.shape[-2], + timestep.shape[-1], + ) + shift1, scale1, shift2, scale2 = ada_values.unbind(dim=1) + + hidden_states = hidden_states * (1 + scale1) + shift1 + + hidden_states = self.non_linearity(hidden_states) + + hidden_states = self.conv1(hidden_states, causal=causal) + + if self.inject_noise: + hidden_states = self._feed_spatial_noise( + hidden_states, self.per_channel_scale1 + ) + + hidden_states = self.norm2(hidden_states) + + if self.timestep_conditioning: + hidden_states = hidden_states * (1 + scale2) + shift2 + + hidden_states = self.non_linearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + + hidden_states = self.conv2(hidden_states, causal=causal) + + if self.inject_noise: + hidden_states = self._feed_spatial_noise( + hidden_states, self.per_channel_scale2 + ) + + input_tensor = self.norm3(input_tensor) + + batch_size = input_tensor.shape[0] + + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = input_tensor + hidden_states + + return output_tensor + + +def patchify(x, patch_size_hw, patch_size_t=1): + if patch_size_hw == 1 and patch_size_t == 1: + return x + if x.dim() == 4: + x = rearrange( + x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size_hw, r=patch_size_hw + ) + elif x.dim() == 5: + x = rearrange( + x, + "b c (f p) (h q) (w r) -> b (c p r q) f h w", + p=patch_size_t, + q=patch_size_hw, + r=patch_size_hw, + ) + else: + raise ValueError(f"Invalid input shape: {x.shape}") + + return x + + +def unpatchify(x, patch_size_hw, patch_size_t=1): + if patch_size_hw == 1 and patch_size_t == 1: + return x + + if x.dim() == 4: + x = rearrange( + x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size_hw, r=patch_size_hw + ) + elif x.dim() == 5: + x = rearrange( + x, + "b (c p r q) f h w -> b c (f p) (h q) (w r)", + p=patch_size_t, + q=patch_size_hw, + r=patch_size_hw, + ) + + return x + + +def create_video_autoencoder_demo_config( + latent_channels: int = 64, +): + encoder_blocks = [ + ("res_x", {"num_layers": 2}), + ("compress_space_res", {"multiplier": 2}), + ("compress_time_res", {"multiplier": 2}), + ("compress_all_res", {"multiplier": 2}), + ("compress_all_res", {"multiplier": 2}), + ("res_x", {"num_layers": 1}), + ] + decoder_blocks = [ + ("res_x", {"num_layers": 2, "inject_noise": False}), + ("compress_all", {"residual": True, "multiplier": 2}), + ("compress_all", {"residual": True, "multiplier": 2}), + ("compress_all", {"residual": True, "multiplier": 2}), + ("res_x", {"num_layers": 2, "inject_noise": False}), + ] + return { + "_class_name": "CausalVideoAutoencoder", + "dims": 3, + "encoder_blocks": encoder_blocks, + "decoder_blocks": decoder_blocks, + "latent_channels": latent_channels, + "norm_layer": "pixel_norm", + "patch_size": 4, + "latent_log_var": "uniform", + "use_quant_conv": False, + "causal_decoder": False, + "timestep_conditioning": True, + "spatial_padding_mode": "replicate", + } + + +def test_vae_patchify_unpatchify(): + import torch + + x = torch.randn(2, 3, 8, 64, 64) + x_patched = patchify(x, patch_size_hw=4, patch_size_t=4) + x_unpatched = unpatchify(x_patched, patch_size_hw=4, patch_size_t=4) + assert torch.allclose(x, x_unpatched) + + +def demo_video_autoencoder_forward_backward(): + # Configuration for the VideoAutoencoder + config = create_video_autoencoder_demo_config() + + # Instantiate the VideoAutoencoder with the specified configuration + video_autoencoder = CausalVideoAutoencoder.from_config(config) + + print(video_autoencoder) + video_autoencoder.eval() + # Print the total number of parameters in the video autoencoder + total_params = sum(p.numel() for p in video_autoencoder.parameters()) + print(f"Total number of parameters in VideoAutoencoder: {total_params:,}") + + # Create a mock input tensor simulating a batch of videos + # Shape: (batch_size, channels, depth, height, width) + # E.g., 4 videos, each with 3 color channels, 16 frames, and 64x64 pixels per frame + input_videos = torch.randn(2, 3, 17, 64, 64) + + # Forward pass: encode and decode the input videos + latent = video_autoencoder.encode(input_videos).latent_dist.mode() + print(f"input shape={input_videos.shape}") + print(f"latent shape={latent.shape}") + + timestep = torch.ones(input_videos.shape[0]) * 0.1 + reconstructed_videos = video_autoencoder.decode( + latent, target_shape=input_videos.shape, timestep=timestep + ).sample + + print(f"reconstructed shape={reconstructed_videos.shape}") + + # Validate that single image gets treated the same way as first frame + input_image = input_videos[:, :, :1, :, :] + image_latent = video_autoencoder.encode(input_image).latent_dist.mode() + _ = video_autoencoder.decode( + image_latent, target_shape=image_latent.shape, timestep=timestep + ).sample + + first_frame_latent = latent[:, :, :1, :, :] + + assert torch.allclose(image_latent, first_frame_latent, atol=1e-6) + # assert torch.allclose(reconstructed_image, reconstructed_videos[:, :, :1, :, :], atol=1e-6) + # assert torch.allclose(image_latent, first_frame_latent, atol=1e-6) + # assert (reconstructed_image == reconstructed_videos[:, :, :1, :, :]).all() + + # Calculate the loss (e.g., mean squared error) + loss = torch.nn.functional.mse_loss(input_videos, reconstructed_videos) + + # Perform backward pass + loss.backward() + + print(f"Demo completed with loss: {loss.item()}") + + +# Ensure to call the demo function to execute the forward and backward pass +if __name__ == "__main__": + demo_video_autoencoder_forward_backward() diff --git a/ltx_video/models/autoencoders/conv_nd_factory.py b/ltx_video/models/autoencoders/conv_nd_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..718c69befd959c7466c4a57d71e46bb80bfe9fba --- /dev/null +++ b/ltx_video/models/autoencoders/conv_nd_factory.py @@ -0,0 +1,90 @@ +from typing import Tuple, Union + +import torch + +from ltx_video.models.autoencoders.dual_conv3d import DualConv3d +from ltx_video.models.autoencoders.causal_conv3d import CausalConv3d + + +def make_conv_nd( + dims: Union[int, Tuple[int, int]], + in_channels: int, + out_channels: int, + kernel_size: int, + stride=1, + padding=0, + dilation=1, + groups=1, + bias=True, + causal=False, + spatial_padding_mode="zeros", + temporal_padding_mode="zeros", +): + if not (spatial_padding_mode == temporal_padding_mode or causal): + raise NotImplementedError("spatial and temporal padding modes must be equal") + if dims == 2: + return torch.nn.Conv2d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=spatial_padding_mode, + ) + elif dims == 3: + if causal: + return CausalConv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + spatial_padding_mode=spatial_padding_mode, + ) + return torch.nn.Conv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=groups, + bias=bias, + padding_mode=spatial_padding_mode, + ) + elif dims == (2, 1): + return DualConv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias, + padding_mode=spatial_padding_mode, + ) + else: + raise ValueError(f"unsupported dimensions: {dims}") + + +def make_linear_nd( + dims: int, + in_channels: int, + out_channels: int, + bias=True, +): + if dims == 2: + return torch.nn.Conv2d( + in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=bias + ) + elif dims == 3 or dims == (2, 1): + return torch.nn.Conv3d( + in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=bias + ) + else: + raise ValueError(f"unsupported dimensions: {dims}") diff --git a/ltx_video/models/autoencoders/dual_conv3d.py b/ltx_video/models/autoencoders/dual_conv3d.py new file mode 100644 index 0000000000000000000000000000000000000000..dcf889296750d3d7e553af37ecf77d1b10245af3 --- /dev/null +++ b/ltx_video/models/autoencoders/dual_conv3d.py @@ -0,0 +1,217 @@ +import math +from typing import Tuple, Union + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + + +class DualConv3d(nn.Module): + def __init__( + self, + in_channels, + out_channels, + kernel_size, + stride: Union[int, Tuple[int, int, int]] = 1, + padding: Union[int, Tuple[int, int, int]] = 0, + dilation: Union[int, Tuple[int, int, int]] = 1, + groups=1, + bias=True, + padding_mode="zeros", + ): + super(DualConv3d, self).__init__() + + self.in_channels = in_channels + self.out_channels = out_channels + self.padding_mode = padding_mode + # Ensure kernel_size, stride, padding, and dilation are tuples of length 3 + if isinstance(kernel_size, int): + kernel_size = (kernel_size, kernel_size, kernel_size) + if kernel_size == (1, 1, 1): + raise ValueError( + "kernel_size must be greater than 1. Use make_linear_nd instead." + ) + if isinstance(stride, int): + stride = (stride, stride, stride) + if isinstance(padding, int): + padding = (padding, padding, padding) + if isinstance(dilation, int): + dilation = (dilation, dilation, dilation) + + # Set parameters for convolutions + self.groups = groups + self.bias = bias + + # Define the size of the channels after the first convolution + intermediate_channels = ( + out_channels if in_channels < out_channels else in_channels + ) + + # Define parameters for the first convolution + self.weight1 = nn.Parameter( + torch.Tensor( + intermediate_channels, + in_channels // groups, + 1, + kernel_size[1], + kernel_size[2], + ) + ) + self.stride1 = (1, stride[1], stride[2]) + self.padding1 = (0, padding[1], padding[2]) + self.dilation1 = (1, dilation[1], dilation[2]) + if bias: + self.bias1 = nn.Parameter(torch.Tensor(intermediate_channels)) + else: + self.register_parameter("bias1", None) + + # Define parameters for the second convolution + self.weight2 = nn.Parameter( + torch.Tensor( + out_channels, intermediate_channels // groups, kernel_size[0], 1, 1 + ) + ) + self.stride2 = (stride[0], 1, 1) + self.padding2 = (padding[0], 0, 0) + self.dilation2 = (dilation[0], 1, 1) + if bias: + self.bias2 = nn.Parameter(torch.Tensor(out_channels)) + else: + self.register_parameter("bias2", None) + + # Initialize weights and biases + self.reset_parameters() + + def reset_parameters(self): + nn.init.kaiming_uniform_(self.weight1, a=math.sqrt(5)) + nn.init.kaiming_uniform_(self.weight2, a=math.sqrt(5)) + if self.bias: + fan_in1, _ = nn.init._calculate_fan_in_and_fan_out(self.weight1) + bound1 = 1 / math.sqrt(fan_in1) + nn.init.uniform_(self.bias1, -bound1, bound1) + fan_in2, _ = nn.init._calculate_fan_in_and_fan_out(self.weight2) + bound2 = 1 / math.sqrt(fan_in2) + nn.init.uniform_(self.bias2, -bound2, bound2) + + def forward(self, x, use_conv3d=False, skip_time_conv=False): + if use_conv3d: + return self.forward_with_3d(x=x, skip_time_conv=skip_time_conv) + else: + return self.forward_with_2d(x=x, skip_time_conv=skip_time_conv) + + def forward_with_3d(self, x, skip_time_conv): + # First convolution + x = F.conv3d( + x, + self.weight1, + self.bias1, + self.stride1, + self.padding1, + self.dilation1, + self.groups, + padding_mode=self.padding_mode, + ) + + if skip_time_conv: + return x + + # Second convolution + x = F.conv3d( + x, + self.weight2, + self.bias2, + self.stride2, + self.padding2, + self.dilation2, + self.groups, + padding_mode=self.padding_mode, + ) + + return x + + def forward_with_2d(self, x, skip_time_conv): + b, c, d, h, w = x.shape + + # First 2D convolution + x = rearrange(x, "b c d h w -> (b d) c h w") + # Squeeze the depth dimension out of weight1 since it's 1 + weight1 = self.weight1.squeeze(2) + # Select stride, padding, and dilation for the 2D convolution + stride1 = (self.stride1[1], self.stride1[2]) + padding1 = (self.padding1[1], self.padding1[2]) + dilation1 = (self.dilation1[1], self.dilation1[2]) + x = F.conv2d( + x, + weight1, + self.bias1, + stride1, + padding1, + dilation1, + self.groups, + padding_mode=self.padding_mode, + ) + + _, _, h, w = x.shape + + if skip_time_conv: + x = rearrange(x, "(b d) c h w -> b c d h w", b=b) + return x + + # Second convolution which is essentially treated as a 1D convolution across the 'd' dimension + x = rearrange(x, "(b d) c h w -> (b h w) c d", b=b) + + # Reshape weight2 to match the expected dimensions for conv1d + weight2 = self.weight2.squeeze(-1).squeeze(-1) + # Use only the relevant dimension for stride, padding, and dilation for the 1D convolution + stride2 = self.stride2[0] + padding2 = self.padding2[0] + dilation2 = self.dilation2[0] + x = F.conv1d( + x, + weight2, + self.bias2, + stride2, + padding2, + dilation2, + self.groups, + padding_mode=self.padding_mode, + ) + x = rearrange(x, "(b h w) c d -> b c d h w", b=b, h=h, w=w) + + return x + + @property + def weight(self): + return self.weight2 + + +def test_dual_conv3d_consistency(): + # Initialize parameters + in_channels = 3 + out_channels = 5 + kernel_size = (3, 3, 3) + stride = (2, 2, 2) + padding = (1, 1, 1) + + # Create an instance of the DualConv3d class + dual_conv3d = DualConv3d( + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=True, + ) + + # Example input tensor + test_input = torch.randn(1, 3, 10, 10, 10) + + # Perform forward passes with both 3D and 2D settings + output_conv3d = dual_conv3d(test_input, use_conv3d=True) + output_2d = dual_conv3d(test_input, use_conv3d=False) + + # Assert that the outputs from both methods are sufficiently close + assert torch.allclose( + output_conv3d, output_2d, atol=1e-6 + ), "Outputs are not consistent between 3D and 2D convolutions." diff --git a/ltx_video/models/autoencoders/latent_upsampler.py b/ltx_video/models/autoencoders/latent_upsampler.py new file mode 100644 index 0000000000000000000000000000000000000000..4a76bc21d1a503d61dec673cf5cb980bb6d703fd --- /dev/null +++ b/ltx_video/models/autoencoders/latent_upsampler.py @@ -0,0 +1,203 @@ +from typing import Optional, Union +from pathlib import Path +import os +import json + +import torch +import torch.nn as nn +from einops import rearrange +from diffusers import ConfigMixin, ModelMixin +from safetensors.torch import safe_open + +from ltx_video.models.autoencoders.pixel_shuffle import PixelShuffleND + + +class ResBlock(nn.Module): + def __init__( + self, channels: int, mid_channels: Optional[int] = None, dims: int = 3 + ): + super().__init__() + if mid_channels is None: + mid_channels = channels + + Conv = nn.Conv2d if dims == 2 else nn.Conv3d + + self.conv1 = Conv(channels, mid_channels, kernel_size=3, padding=1) + self.norm1 = nn.GroupNorm(32, mid_channels) + self.conv2 = Conv(mid_channels, channels, kernel_size=3, padding=1) + self.norm2 = nn.GroupNorm(32, channels) + self.activation = nn.SiLU() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + residual = x + x = self.conv1(x) + x = self.norm1(x) + x = self.activation(x) + x = self.conv2(x) + x = self.norm2(x) + x = self.activation(x + residual) + return x + + +class LatentUpsampler(ModelMixin, ConfigMixin): + """ + Model to spatially upsample VAE latents. + + Args: + in_channels (`int`): Number of channels in the input latent + mid_channels (`int`): Number of channels in the middle layers + num_blocks_per_stage (`int`): Number of ResBlocks to use in each stage (pre/post upsampling) + dims (`int`): Number of dimensions for convolutions (2 or 3) + spatial_upsample (`bool`): Whether to spatially upsample the latent + temporal_upsample (`bool`): Whether to temporally upsample the latent + """ + + def __init__( + self, + in_channels: int = 128, + mid_channels: int = 512, + num_blocks_per_stage: int = 4, + dims: int = 3, + spatial_upsample: bool = True, + temporal_upsample: bool = False, + ): + super().__init__() + + self.in_channels = in_channels + self.mid_channels = mid_channels + self.num_blocks_per_stage = num_blocks_per_stage + self.dims = dims + self.spatial_upsample = spatial_upsample + self.temporal_upsample = temporal_upsample + + Conv = nn.Conv2d if dims == 2 else nn.Conv3d + + self.initial_conv = Conv(in_channels, mid_channels, kernel_size=3, padding=1) + self.initial_norm = nn.GroupNorm(32, mid_channels) + self.initial_activation = nn.SiLU() + + self.res_blocks = nn.ModuleList( + [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)] + ) + + if spatial_upsample and temporal_upsample: + self.upsampler = nn.Sequential( + nn.Conv3d(mid_channels, 8 * mid_channels, kernel_size=3, padding=1), + PixelShuffleND(3), + ) + elif spatial_upsample: + self.upsampler = nn.Sequential( + nn.Conv2d(mid_channels, 4 * mid_channels, kernel_size=3, padding=1), + PixelShuffleND(2), + ) + elif temporal_upsample: + self.upsampler = nn.Sequential( + nn.Conv3d(mid_channels, 2 * mid_channels, kernel_size=3, padding=1), + PixelShuffleND(1), + ) + else: + raise ValueError( + "Either spatial_upsample or temporal_upsample must be True" + ) + + self.post_upsample_res_blocks = nn.ModuleList( + [ResBlock(mid_channels, dims=dims) for _ in range(num_blocks_per_stage)] + ) + + self.final_conv = Conv(mid_channels, in_channels, kernel_size=3, padding=1) + + def forward(self, latent: torch.Tensor) -> torch.Tensor: + b, c, f, h, w = latent.shape + + if self.dims == 2: + x = rearrange(latent, "b c f h w -> (b f) c h w") + x = self.initial_conv(x) + x = self.initial_norm(x) + x = self.initial_activation(x) + + for block in self.res_blocks: + x = block(x) + + x = self.upsampler(x) + + for block in self.post_upsample_res_blocks: + x = block(x) + + x = self.final_conv(x) + x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f) + else: + x = self.initial_conv(latent) + x = self.initial_norm(x) + x = self.initial_activation(x) + + for block in self.res_blocks: + x = block(x) + + if self.temporal_upsample: + x = self.upsampler(x) + x = x[:, :, 1:, :, :] + else: + x = rearrange(x, "b c f h w -> (b f) c h w") + x = self.upsampler(x) + x = rearrange(x, "(b f) c h w -> b c f h w", b=b, f=f) + + for block in self.post_upsample_res_blocks: + x = block(x) + + x = self.final_conv(x) + + return x + + @classmethod + def from_config(cls, config): + return cls( + in_channels=config.get("in_channels", 4), + mid_channels=config.get("mid_channels", 128), + num_blocks_per_stage=config.get("num_blocks_per_stage", 4), + dims=config.get("dims", 2), + spatial_upsample=config.get("spatial_upsample", True), + temporal_upsample=config.get("temporal_upsample", False), + ) + + def config(self): + return { + "_class_name": "LatentUpsampler", + "in_channels": self.in_channels, + "mid_channels": self.mid_channels, + "num_blocks_per_stage": self.num_blocks_per_stage, + "dims": self.dims, + "spatial_upsample": self.spatial_upsample, + "temporal_upsample": self.temporal_upsample, + } + + @classmethod + def from_pretrained( + cls, + pretrained_model_path: Optional[Union[str, os.PathLike]], + *args, + **kwargs, + ): + pretrained_model_path = Path(pretrained_model_path) + if pretrained_model_path.is_file() and str(pretrained_model_path).endswith( + ".safetensors" + ): + state_dict = {} + with safe_open(pretrained_model_path, framework="pt", device="cpu") as f: + metadata = f.metadata() + for k in f.keys(): + state_dict[k] = f.get_tensor(k) + config = json.loads(metadata["config"]) + with torch.device("meta"): + latent_upsampler = LatentUpsampler.from_config(config) + latent_upsampler.load_state_dict(state_dict, assign=True) + return latent_upsampler + + +if __name__ == "__main__": + latent_upsampler = LatentUpsampler(num_blocks_per_stage=4, dims=3) + print(latent_upsampler) + total_params = sum(p.numel() for p in latent_upsampler.parameters()) + print(f"Total number of parameters: {total_params:,}") + latent = torch.randn(1, 128, 9, 16, 16) + upsampled_latent = latent_upsampler(latent) + print(f"Upsampled latent shape: {upsampled_latent.shape}") diff --git a/ltx_video/models/autoencoders/pixel_norm.py b/ltx_video/models/autoencoders/pixel_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..9bc3ea60e8a6453e7e12a7fb5aca4de3958a2567 --- /dev/null +++ b/ltx_video/models/autoencoders/pixel_norm.py @@ -0,0 +1,12 @@ +import torch +from torch import nn + + +class PixelNorm(nn.Module): + def __init__(self, dim=1, eps=1e-8): + super(PixelNorm, self).__init__() + self.dim = dim + self.eps = eps + + def forward(self, x): + return x / torch.sqrt(torch.mean(x**2, dim=self.dim, keepdim=True) + self.eps) diff --git a/ltx_video/models/autoencoders/pixel_shuffle.py b/ltx_video/models/autoencoders/pixel_shuffle.py new file mode 100644 index 0000000000000000000000000000000000000000..4e79ae28483d5ad684ea68092bc955ef025722e6 --- /dev/null +++ b/ltx_video/models/autoencoders/pixel_shuffle.py @@ -0,0 +1,33 @@ +import torch.nn as nn +from einops import rearrange + + +class PixelShuffleND(nn.Module): + def __init__(self, dims, upscale_factors=(2, 2, 2)): + super().__init__() + assert dims in [1, 2, 3], "dims must be 1, 2, or 3" + self.dims = dims + self.upscale_factors = upscale_factors + + def forward(self, x): + if self.dims == 3: + return rearrange( + x, + "b (c p1 p2 p3) d h w -> b c (d p1) (h p2) (w p3)", + p1=self.upscale_factors[0], + p2=self.upscale_factors[1], + p3=self.upscale_factors[2], + ) + elif self.dims == 2: + return rearrange( + x, + "b (c p1 p2) h w -> b c (h p1) (w p2)", + p1=self.upscale_factors[0], + p2=self.upscale_factors[1], + ) + elif self.dims == 1: + return rearrange( + x, + "b (c p1) f h w -> b c (f p1) h w", + p1=self.upscale_factors[0], + ) diff --git a/ltx_video/models/autoencoders/vae.py b/ltx_video/models/autoencoders/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..5b22217c158eb26bca45b2b6a5e475e8a71b8181 --- /dev/null +++ b/ltx_video/models/autoencoders/vae.py @@ -0,0 +1,380 @@ +from typing import Optional, Union + +import torch +import inspect +import math +import torch.nn as nn +from diffusers import ConfigMixin, ModelMixin +from diffusers.models.autoencoders.vae import ( + DecoderOutput, + DiagonalGaussianDistribution, +) +from diffusers.models.modeling_outputs import AutoencoderKLOutput +from ltx_video.models.autoencoders.conv_nd_factory import make_conv_nd + + +class AutoencoderKLWrapper(ModelMixin, ConfigMixin): + """Variational Autoencoder (VAE) model with KL loss. + + VAE from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma and Max Welling. + This model is a wrapper around an encoder and a decoder, and it adds a KL loss term to the reconstruction loss. + + Args: + encoder (`nn.Module`): + Encoder module. + decoder (`nn.Module`): + Decoder module. + latent_channels (`int`, *optional*, defaults to 4): + Number of latent channels. + """ + + def __init__( + self, + encoder: nn.Module, + decoder: nn.Module, + latent_channels: int = 4, + dims: int = 2, + sample_size=512, + use_quant_conv: bool = True, + normalize_latent_channels: bool = False, + ): + super().__init__() + + # pass init params to Encoder + self.encoder = encoder + self.use_quant_conv = use_quant_conv + self.normalize_latent_channels = normalize_latent_channels + + # pass init params to Decoder + quant_dims = 2 if dims == 2 else 3 + self.decoder = decoder + if use_quant_conv: + self.quant_conv = make_conv_nd( + quant_dims, 2 * latent_channels, 2 * latent_channels, 1 + ) + self.post_quant_conv = make_conv_nd( + quant_dims, latent_channels, latent_channels, 1 + ) + else: + self.quant_conv = nn.Identity() + self.post_quant_conv = nn.Identity() + + if normalize_latent_channels: + if dims == 2: + self.latent_norm_out = nn.BatchNorm2d(latent_channels, affine=False) + else: + self.latent_norm_out = nn.BatchNorm3d(latent_channels, affine=False) + else: + self.latent_norm_out = nn.Identity() + self.use_z_tiling = False + self.use_hw_tiling = False + self.dims = dims + self.z_sample_size = 1 + + self.decoder_params = inspect.signature(self.decoder.forward).parameters + + # only relevant if vae tiling is enabled + self.set_tiling_params(sample_size=sample_size, overlap_factor=0.25) + + def set_tiling_params(self, sample_size: int = 512, overlap_factor: float = 0.25): + self.tile_sample_min_size = sample_size + num_blocks = len(self.encoder.down_blocks) + self.tile_latent_min_size = int(sample_size / (2 ** (num_blocks - 1))) + self.tile_overlap_factor = overlap_factor + + def enable_z_tiling(self, z_sample_size: int = 8): + r""" + Enable tiling during VAE decoding. + + When this option is enabled, the VAE will split the input tensor in tiles to compute decoding in several + steps. This is useful to save some memory and allow larger batch sizes. + """ + self.use_z_tiling = z_sample_size > 1 + self.z_sample_size = z_sample_size + assert ( + z_sample_size % 8 == 0 or z_sample_size == 1 + ), f"z_sample_size must be a multiple of 8 or 1. Got {z_sample_size}." + + def disable_z_tiling(self): + r""" + Disable tiling during VAE decoding. If `use_tiling` was previously invoked, this method will go back to computing + decoding in one step. + """ + self.use_z_tiling = False + + def enable_hw_tiling(self): + r""" + Enable tiling during VAE decoding along the height and width dimension. + """ + self.use_hw_tiling = True + + def disable_hw_tiling(self): + r""" + Disable tiling during VAE decoding along the height and width dimension. + """ + self.use_hw_tiling = False + + def _hw_tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True): + overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor) + row_limit = self.tile_latent_min_size - blend_extent + + # Split the image into 512x512 tiles and encode them separately. + rows = [] + for i in range(0, x.shape[3], overlap_size): + row = [] + for j in range(0, x.shape[4], overlap_size): + tile = x[ + :, + :, + :, + i : i + self.tile_sample_min_size, + j : j + self.tile_sample_min_size, + ] + tile = self.encoder(tile) + tile = self.quant_conv(tile) + row.append(tile) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=4)) + + moments = torch.cat(result_rows, dim=3) + return moments + + def blend_z( + self, a: torch.Tensor, b: torch.Tensor, blend_extent: int + ) -> torch.Tensor: + blend_extent = min(a.shape[2], b.shape[2], blend_extent) + for z in range(blend_extent): + b[:, :, z, :, :] = a[:, :, -blend_extent + z, :, :] * ( + 1 - z / blend_extent + ) + b[:, :, z, :, :] * (z / blend_extent) + return b + + def blend_v( + self, a: torch.Tensor, b: torch.Tensor, blend_extent: int + ) -> torch.Tensor: + blend_extent = min(a.shape[3], b.shape[3], blend_extent) + for y in range(blend_extent): + b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * ( + 1 - y / blend_extent + ) + b[:, :, :, y, :] * (y / blend_extent) + return b + + def blend_h( + self, a: torch.Tensor, b: torch.Tensor, blend_extent: int + ) -> torch.Tensor: + blend_extent = min(a.shape[4], b.shape[4], blend_extent) + for x in range(blend_extent): + b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * ( + 1 - x / blend_extent + ) + b[:, :, :, :, x] * (x / blend_extent) + return b + + def _hw_tiled_decode(self, z: torch.FloatTensor, target_shape): + overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor)) + blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor) + row_limit = self.tile_sample_min_size - blend_extent + tile_target_shape = ( + *target_shape[:3], + self.tile_sample_min_size, + self.tile_sample_min_size, + ) + # Split z into overlapping 64x64 tiles and decode them separately. + # The tiles have an overlap to avoid seams between tiles. + rows = [] + for i in range(0, z.shape[3], overlap_size): + row = [] + for j in range(0, z.shape[4], overlap_size): + tile = z[ + :, + :, + :, + i : i + self.tile_latent_min_size, + j : j + self.tile_latent_min_size, + ] + tile = self.post_quant_conv(tile) + decoded = self.decoder(tile, target_shape=tile_target_shape) + row.append(decoded) + rows.append(row) + result_rows = [] + for i, row in enumerate(rows): + result_row = [] + for j, tile in enumerate(row): + # blend the above tile and the left tile + # to the current tile and add the current tile to the result row + if i > 0: + tile = self.blend_v(rows[i - 1][j], tile, blend_extent) + if j > 0: + tile = self.blend_h(row[j - 1], tile, blend_extent) + result_row.append(tile[:, :, :, :row_limit, :row_limit]) + result_rows.append(torch.cat(result_row, dim=4)) + + dec = torch.cat(result_rows, dim=3) + return dec + + def encode( + self, z: torch.FloatTensor, return_dict: bool = True + ) -> Union[DecoderOutput, torch.FloatTensor]: + if self.use_z_tiling and z.shape[2] > self.z_sample_size > 1: + num_splits = z.shape[2] // self.z_sample_size + sizes = [self.z_sample_size] * num_splits + sizes = ( + sizes + [z.shape[2] - sum(sizes)] + if z.shape[2] - sum(sizes) > 0 + else sizes + ) + tiles = z.split(sizes, dim=2) + moments_tiles = [ + ( + self._hw_tiled_encode(z_tile, return_dict) + if self.use_hw_tiling + else self._encode(z_tile) + ) + for z_tile in tiles + ] + moments = torch.cat(moments_tiles, dim=2) + + else: + moments = ( + self._hw_tiled_encode(z, return_dict) + if self.use_hw_tiling + else self._encode(z) + ) + + posterior = DiagonalGaussianDistribution(moments) + if not return_dict: + return (posterior,) + + return AutoencoderKLOutput(latent_dist=posterior) + + def _normalize_latent_channels(self, z: torch.FloatTensor) -> torch.FloatTensor: + if isinstance(self.latent_norm_out, nn.BatchNorm3d): + _, c, _, _, _ = z.shape + z = torch.cat( + [ + self.latent_norm_out(z[:, : c // 2, :, :, :]), + z[:, c // 2 :, :, :, :], + ], + dim=1, + ) + elif isinstance(self.latent_norm_out, nn.BatchNorm2d): + raise NotImplementedError("BatchNorm2d not supported") + return z + + def _unnormalize_latent_channels(self, z: torch.FloatTensor) -> torch.FloatTensor: + if isinstance(self.latent_norm_out, nn.BatchNorm3d): + running_mean = self.latent_norm_out.running_mean.view(1, -1, 1, 1, 1) + running_var = self.latent_norm_out.running_var.view(1, -1, 1, 1, 1) + eps = self.latent_norm_out.eps + + z = z * torch.sqrt(running_var + eps) + running_mean + elif isinstance(self.latent_norm_out, nn.BatchNorm3d): + raise NotImplementedError("BatchNorm2d not supported") + return z + + def _encode(self, x: torch.FloatTensor) -> AutoencoderKLOutput: + h = self.encoder(x) + moments = self.quant_conv(h) + moments = self._normalize_latent_channels(moments) + return moments + + def _decode( + self, + z: torch.FloatTensor, + target_shape=None, + timestep: Optional[torch.Tensor] = None, + ) -> Union[DecoderOutput, torch.FloatTensor]: + z = self._unnormalize_latent_channels(z) + z = self.post_quant_conv(z) + if "timestep" in self.decoder_params: + dec = self.decoder(z, target_shape=target_shape, timestep=timestep) + else: + dec = self.decoder(z, target_shape=target_shape) + return dec + + def decode( + self, + z: torch.FloatTensor, + return_dict: bool = True, + target_shape=None, + timestep: Optional[torch.Tensor] = None, + ) -> Union[DecoderOutput, torch.FloatTensor]: + assert target_shape is not None, "target_shape must be provided for decoding" + if self.use_z_tiling and z.shape[2] > self.z_sample_size > 1: + reduction_factor = int( + self.encoder.patch_size_t + * 2 + ** ( + len(self.encoder.down_blocks) + - 1 + - math.sqrt(self.encoder.patch_size) + ) + ) + split_size = self.z_sample_size // reduction_factor + num_splits = z.shape[2] // split_size + + # copy target shape, and divide frame dimension (=2) by the context size + target_shape_split = list(target_shape) + target_shape_split[2] = target_shape[2] // num_splits + + decoded_tiles = [ + ( + self._hw_tiled_decode(z_tile, target_shape_split) + if self.use_hw_tiling + else self._decode(z_tile, target_shape=target_shape_split) + ) + for z_tile in torch.tensor_split(z, num_splits, dim=2) + ] + decoded = torch.cat(decoded_tiles, dim=2) + else: + decoded = ( + self._hw_tiled_decode(z, target_shape) + if self.use_hw_tiling + else self._decode(z, target_shape=target_shape, timestep=timestep) + ) + + if not return_dict: + return (decoded,) + + return DecoderOutput(sample=decoded) + + def forward( + self, + sample: torch.FloatTensor, + sample_posterior: bool = False, + return_dict: bool = True, + generator: Optional[torch.Generator] = None, + ) -> Union[DecoderOutput, torch.FloatTensor]: + r""" + Args: + sample (`torch.FloatTensor`): Input sample. + sample_posterior (`bool`, *optional*, defaults to `False`): + Whether to sample from the posterior. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`DecoderOutput`] instead of a plain tuple. + generator (`torch.Generator`, *optional*): + Generator used to sample from the posterior. + """ + x = sample + posterior = self.encode(x).latent_dist + if sample_posterior: + z = posterior.sample(generator=generator) + else: + z = posterior.mode() + dec = self.decode(z, target_shape=sample.shape).sample + + if not return_dict: + return (dec,) + + return DecoderOutput(sample=dec) diff --git a/ltx_video/models/autoencoders/vae_encode.py b/ltx_video/models/autoencoders/vae_encode.py new file mode 100644 index 0000000000000000000000000000000000000000..bfc97f6720ecbef51711cb47cd759532d8813128 --- /dev/null +++ b/ltx_video/models/autoencoders/vae_encode.py @@ -0,0 +1,247 @@ +from typing import Tuple +import torch +from diffusers import AutoencoderKL +from einops import rearrange +from torch import Tensor + + +from ltx_video.models.autoencoders.causal_video_autoencoder import ( + CausalVideoAutoencoder, +) +from ltx_video.models.autoencoders.video_autoencoder import ( + Downsample3D, + VideoAutoencoder, +) + +try: + import torch_xla.core.xla_model as xm +except ImportError: + xm = None + + +def vae_encode( + media_items: Tensor, + vae: AutoencoderKL, + split_size: int = 1, + vae_per_channel_normalize=False, +) -> Tensor: + """ + Encodes media items (images or videos) into latent representations using a specified VAE model. + The function supports processing batches of images or video frames and can handle the processing + in smaller sub-batches if needed. + + Args: + media_items (Tensor): A torch Tensor containing the media items to encode. The expected + shape is (batch_size, channels, height, width) for images or (batch_size, channels, + frames, height, width) for videos. + vae (AutoencoderKL): An instance of the `AutoencoderKL` class from the `diffusers` library, + pre-configured and loaded with the appropriate model weights. + split_size (int, optional): The number of sub-batches to split the input batch into for encoding. + If set to more than 1, the input media items are processed in smaller batches according to + this value. Defaults to 1, which processes all items in a single batch. + + Returns: + Tensor: A torch Tensor of the encoded latent representations. The shape of the tensor is adjusted + to match the input shape, scaled by the model's configuration. + + Examples: + >>> import torch + >>> from diffusers import AutoencoderKL + >>> vae = AutoencoderKL.from_pretrained('your-model-name') + >>> images = torch.rand(10, 3, 8 256, 256) # Example tensor with 10 videos of 8 frames. + >>> latents = vae_encode(images, vae) + >>> print(latents.shape) # Output shape will depend on the model's latent configuration. + + Note: + In case of a video, the function encodes the media item frame-by frame. + """ + is_video_shaped = media_items.dim() == 5 + batch_size, channels = media_items.shape[0:2] + + if channels != 3: + raise ValueError(f"Expects tensors with 3 channels, got {channels}.") + + if is_video_shaped and not isinstance( + vae, (VideoAutoencoder, CausalVideoAutoencoder) + ): + media_items = rearrange(media_items, "b c n h w -> (b n) c h w") + if split_size > 1: + if len(media_items) % split_size != 0: + raise ValueError( + "Error: The batch size must be divisible by 'train.vae_bs_split" + ) + encode_bs = len(media_items) // split_size + # latents = [vae.encode(image_batch).latent_dist.sample() for image_batch in media_items.split(encode_bs)] + latents = [] + if media_items.device.type == "xla": + xm.mark_step() + for image_batch in media_items.split(encode_bs): + latents.append(vae.encode(image_batch).latent_dist.sample()) + if media_items.device.type == "xla": + xm.mark_step() + latents = torch.cat(latents, dim=0) + else: + latents = vae.encode(media_items).latent_dist.sample() + + latents = normalize_latents(latents, vae, vae_per_channel_normalize) + if is_video_shaped and not isinstance( + vae, (VideoAutoencoder, CausalVideoAutoencoder) + ): + latents = rearrange(latents, "(b n) c h w -> b c n h w", b=batch_size) + return latents + + +def vae_decode( + latents: Tensor, + vae: AutoencoderKL, + is_video: bool = True, + split_size: int = 1, + vae_per_channel_normalize=False, + timestep=None, +) -> Tensor: + is_video_shaped = latents.dim() == 5 + batch_size = latents.shape[0] + + if is_video_shaped and not isinstance( + vae, (VideoAutoencoder, CausalVideoAutoencoder) + ): + latents = rearrange(latents, "b c n h w -> (b n) c h w") + if split_size > 1: + if len(latents) % split_size != 0: + raise ValueError( + "Error: The batch size must be divisible by 'train.vae_bs_split" + ) + encode_bs = len(latents) // split_size + image_batch = [ + _run_decoder( + latent_batch, vae, is_video, vae_per_channel_normalize, timestep + ) + for latent_batch in latents.split(encode_bs) + ] + images = torch.cat(image_batch, dim=0) + else: + images = _run_decoder( + latents, vae, is_video, vae_per_channel_normalize, timestep + ) + + if is_video_shaped and not isinstance( + vae, (VideoAutoencoder, CausalVideoAutoencoder) + ): + images = rearrange(images, "(b n) c h w -> b c n h w", b=batch_size) + return images + + +def _run_decoder( + latents: Tensor, + vae: AutoencoderKL, + is_video: bool, + vae_per_channel_normalize=False, + timestep=None, +) -> Tensor: + if isinstance(vae, (VideoAutoencoder, CausalVideoAutoencoder)): + *_, fl, hl, wl = latents.shape + temporal_scale, spatial_scale, _ = get_vae_size_scale_factor(vae) + latents = latents.to(vae.dtype) + vae_decode_kwargs = {} + if timestep is not None: + vae_decode_kwargs["timestep"] = timestep + image = vae.decode( + un_normalize_latents(latents, vae, vae_per_channel_normalize), + return_dict=False, + target_shape=( + 1, + 3, + fl * temporal_scale if is_video else 1, + hl * spatial_scale, + wl * spatial_scale, + ), + **vae_decode_kwargs, + )[0] + else: + image = vae.decode( + un_normalize_latents(latents, vae, vae_per_channel_normalize), + return_dict=False, + )[0] + return image + + +def get_vae_size_scale_factor(vae: AutoencoderKL) -> float: + if isinstance(vae, CausalVideoAutoencoder): + spatial = vae.spatial_downscale_factor + temporal = vae.temporal_downscale_factor + else: + down_blocks = len( + [ + block + for block in vae.encoder.down_blocks + if isinstance(block.downsample, Downsample3D) + ] + ) + spatial = vae.config.patch_size * 2**down_blocks + temporal = ( + vae.config.patch_size_t * 2**down_blocks + if isinstance(vae, VideoAutoencoder) + else 1 + ) + + return (temporal, spatial, spatial) + + +def latent_to_pixel_coords( + latent_coords: Tensor, vae: AutoencoderKL, causal_fix: bool = False +) -> Tensor: + """ + Converts latent coordinates to pixel coordinates by scaling them according to the VAE's + configuration. + + Args: + latent_coords (Tensor): A tensor of shape [batch_size, 3, num_latents] + containing the latent corner coordinates of each token. + vae (AutoencoderKL): The VAE model + causal_fix (bool): Whether to take into account the different temporal scale + of the first frame. Default = False for backwards compatibility. + Returns: + Tensor: A tensor of pixel coordinates corresponding to the input latent coordinates. + """ + + scale_factors = get_vae_size_scale_factor(vae) + causal_fix = isinstance(vae, CausalVideoAutoencoder) and causal_fix + pixel_coords = latent_to_pixel_coords_from_factors( + latent_coords, scale_factors, causal_fix + ) + return pixel_coords + + +def latent_to_pixel_coords_from_factors( + latent_coords: Tensor, scale_factors: Tuple, causal_fix: bool = False +) -> Tensor: + pixel_coords = ( + latent_coords + * torch.tensor(scale_factors, device=latent_coords.device)[None, :, None] + ) + if causal_fix: + # Fix temporal scale for first frame to 1 due to causality + pixel_coords[:, 0] = (pixel_coords[:, 0] + 1 - scale_factors[0]).clamp(min=0) + return pixel_coords + + +def normalize_latents( + latents: Tensor, vae: AutoencoderKL, vae_per_channel_normalize: bool = False +) -> Tensor: + return ( + (latents - vae.mean_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)) + / vae.std_of_means.to(latents.dtype).view(1, -1, 1, 1, 1) + if vae_per_channel_normalize + else latents * vae.config.scaling_factor + ) + + +def un_normalize_latents( + latents: Tensor, vae: AutoencoderKL, vae_per_channel_normalize: bool = False +) -> Tensor: + return ( + latents * vae.std_of_means.to(latents.dtype).view(1, -1, 1, 1, 1) + + vae.mean_of_means.to(latents.dtype).view(1, -1, 1, 1, 1) + if vae_per_channel_normalize + else latents / vae.config.scaling_factor + ) diff --git a/ltx_video/models/autoencoders/video_autoencoder.py b/ltx_video/models/autoencoders/video_autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..3c7926c1d3afb8188221b2e569aaaf89f7271bce --- /dev/null +++ b/ltx_video/models/autoencoders/video_autoencoder.py @@ -0,0 +1,1045 @@ +import json +import os +from functools import partial +from types import SimpleNamespace +from typing import Any, Mapping, Optional, Tuple, Union + +import torch +from einops import rearrange +from torch import nn +from torch.nn import functional + +from diffusers.utils import logging + +from ltx_video.utils.torch_utils import Identity +from ltx_video.models.autoencoders.conv_nd_factory import make_conv_nd, make_linear_nd +from ltx_video.models.autoencoders.pixel_norm import PixelNorm +from ltx_video.models.autoencoders.vae import AutoencoderKLWrapper + +logger = logging.get_logger(__name__) + + +class VideoAutoencoder(AutoencoderKLWrapper): + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], + *args, + **kwargs, + ): + config_local_path = pretrained_model_name_or_path / "config.json" + config = cls.load_config(config_local_path, **kwargs) + video_vae = cls.from_config(config) + video_vae.to(kwargs["torch_dtype"]) + + model_local_path = pretrained_model_name_or_path / "autoencoder.pth" + ckpt_state_dict = torch.load(model_local_path) + video_vae.load_state_dict(ckpt_state_dict) + + statistics_local_path = ( + pretrained_model_name_or_path / "per_channel_statistics.json" + ) + if statistics_local_path.exists(): + with open(statistics_local_path, "r") as file: + data = json.load(file) + transposed_data = list(zip(*data["data"])) + data_dict = { + col: torch.tensor(vals) + for col, vals in zip(data["columns"], transposed_data) + } + video_vae.register_buffer("std_of_means", data_dict["std-of-means"]) + video_vae.register_buffer( + "mean_of_means", + data_dict.get( + "mean-of-means", torch.zeros_like(data_dict["std-of-means"]) + ), + ) + + return video_vae + + @staticmethod + def from_config(config): + assert ( + config["_class_name"] == "VideoAutoencoder" + ), "config must have _class_name=VideoAutoencoder" + if isinstance(config["dims"], list): + config["dims"] = tuple(config["dims"]) + + assert config["dims"] in [2, 3, (2, 1)], "dims must be 2, 3 or (2, 1)" + + double_z = config.get("double_z", True) + latent_log_var = config.get( + "latent_log_var", "per_channel" if double_z else "none" + ) + use_quant_conv = config.get("use_quant_conv", True) + + if use_quant_conv and latent_log_var == "uniform": + raise ValueError("uniform latent_log_var requires use_quant_conv=False") + + encoder = Encoder( + dims=config["dims"], + in_channels=config.get("in_channels", 3), + out_channels=config["latent_channels"], + block_out_channels=config["block_out_channels"], + patch_size=config.get("patch_size", 1), + latent_log_var=latent_log_var, + norm_layer=config.get("norm_layer", "group_norm"), + patch_size_t=config.get("patch_size_t", config.get("patch_size", 1)), + add_channel_padding=config.get("add_channel_padding", False), + ) + + decoder = Decoder( + dims=config["dims"], + in_channels=config["latent_channels"], + out_channels=config.get("out_channels", 3), + block_out_channels=config["block_out_channels"], + patch_size=config.get("patch_size", 1), + norm_layer=config.get("norm_layer", "group_norm"), + patch_size_t=config.get("patch_size_t", config.get("patch_size", 1)), + add_channel_padding=config.get("add_channel_padding", False), + ) + + dims = config["dims"] + return VideoAutoencoder( + encoder=encoder, + decoder=decoder, + latent_channels=config["latent_channels"], + dims=dims, + use_quant_conv=use_quant_conv, + ) + + @property + def config(self): + return SimpleNamespace( + _class_name="VideoAutoencoder", + dims=self.dims, + in_channels=self.encoder.conv_in.in_channels + // (self.encoder.patch_size_t * self.encoder.patch_size**2), + out_channels=self.decoder.conv_out.out_channels + // (self.decoder.patch_size_t * self.decoder.patch_size**2), + latent_channels=self.decoder.conv_in.in_channels, + block_out_channels=[ + self.encoder.down_blocks[i].res_blocks[-1].conv1.out_channels + for i in range(len(self.encoder.down_blocks)) + ], + scaling_factor=1.0, + norm_layer=self.encoder.norm_layer, + patch_size=self.encoder.patch_size, + latent_log_var=self.encoder.latent_log_var, + use_quant_conv=self.use_quant_conv, + patch_size_t=self.encoder.patch_size_t, + add_channel_padding=self.encoder.add_channel_padding, + ) + + @property + def is_video_supported(self): + """ + Check if the model supports video inputs of shape (B, C, F, H, W). Otherwise, the model only supports 2D images. + """ + return self.dims != 2 + + @property + def downscale_factor(self): + return self.encoder.downsample_factor + + def to_json_string(self) -> str: + import json + + return json.dumps(self.config.__dict__) + + def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True): + model_keys = set(name for name, _ in self.named_parameters()) + + key_mapping = { + ".resnets.": ".res_blocks.", + "downsamplers.0": "downsample", + "upsamplers.0": "upsample", + } + + converted_state_dict = {} + for key, value in state_dict.items(): + for k, v in key_mapping.items(): + key = key.replace(k, v) + + if "norm" in key and key not in model_keys: + logger.info( + f"Removing key {key} from state_dict as it is not present in the model" + ) + continue + + converted_state_dict[key] = value + + super().load_state_dict(converted_state_dict, strict=strict) + + def last_layer(self): + if hasattr(self.decoder, "conv_out"): + if isinstance(self.decoder.conv_out, nn.Sequential): + last_layer = self.decoder.conv_out[-1] + else: + last_layer = self.decoder.conv_out + else: + last_layer = self.decoder.layers[-1] + return last_layer + + +class Encoder(nn.Module): + r""" + The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation. + + Args: + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): + The number of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + patch_size (`int`, *optional*, defaults to 1): + The patch size to use. Should be a power of 2. + norm_layer (`str`, *optional*, defaults to `group_norm`): + The normalization layer to use. Can be either `group_norm` or `pixel_norm`. + latent_log_var (`str`, *optional*, defaults to `per_channel`): + The number of channels for the log variance. Can be either `per_channel`, `uniform`, or `none`. + """ + + def __init__( + self, + dims: Union[int, Tuple[int, int]] = 3, + in_channels: int = 3, + out_channels: int = 3, + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + patch_size: Union[int, Tuple[int]] = 1, + norm_layer: str = "group_norm", # group_norm, pixel_norm + latent_log_var: str = "per_channel", + patch_size_t: Optional[int] = None, + add_channel_padding: Optional[bool] = False, + ): + super().__init__() + self.patch_size = patch_size + self.patch_size_t = patch_size_t if patch_size_t is not None else patch_size + self.add_channel_padding = add_channel_padding + self.layers_per_block = layers_per_block + self.norm_layer = norm_layer + self.latent_channels = out_channels + self.latent_log_var = latent_log_var + if add_channel_padding: + in_channels = in_channels * self.patch_size**3 + else: + in_channels = in_channels * self.patch_size_t * self.patch_size**2 + self.in_channels = in_channels + output_channel = block_out_channels[0] + + self.conv_in = make_conv_nd( + dims=dims, + in_channels=in_channels, + out_channels=output_channel, + kernel_size=3, + stride=1, + padding=1, + ) + + self.down_blocks = nn.ModuleList([]) + + for i in range(len(block_out_channels)): + input_channel = output_channel + output_channel = block_out_channels[i] + is_final_block = i == len(block_out_channels) - 1 + + down_block = DownEncoderBlock3D( + dims=dims, + in_channels=input_channel, + out_channels=output_channel, + num_layers=self.layers_per_block, + add_downsample=not is_final_block and 2**i >= patch_size, + resnet_eps=1e-6, + downsample_padding=0, + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + ) + self.down_blocks.append(down_block) + + self.mid_block = UNetMidBlock3D( + dims=dims, + in_channels=block_out_channels[-1], + num_layers=self.layers_per_block, + resnet_eps=1e-6, + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + ) + + # out + if norm_layer == "group_norm": + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[-1], + num_groups=norm_num_groups, + eps=1e-6, + ) + elif norm_layer == "pixel_norm": + self.conv_norm_out = PixelNorm() + self.conv_act = nn.SiLU() + + conv_out_channels = out_channels + if latent_log_var == "per_channel": + conv_out_channels *= 2 + elif latent_log_var == "uniform": + conv_out_channels += 1 + elif latent_log_var != "none": + raise ValueError(f"Invalid latent_log_var: {latent_log_var}") + self.conv_out = make_conv_nd( + dims, block_out_channels[-1], conv_out_channels, 3, padding=1 + ) + + self.gradient_checkpointing = False + + @property + def downscale_factor(self): + return ( + 2 + ** len( + [ + block + for block in self.down_blocks + if isinstance(block.downsample, Downsample3D) + ] + ) + * self.patch_size + ) + + def forward( + self, sample: torch.FloatTensor, return_features=False + ) -> torch.FloatTensor: + r"""The forward method of the `Encoder` class.""" + + downsample_in_time = sample.shape[2] != 1 + + # patchify + patch_size_t = self.patch_size_t if downsample_in_time else 1 + sample = patchify( + sample, + patch_size_hw=self.patch_size, + patch_size_t=patch_size_t, + add_channel_padding=self.add_channel_padding, + ) + + sample = self.conv_in(sample) + + checkpoint_fn = ( + partial(torch.utils.checkpoint.checkpoint, use_reentrant=False) + if self.gradient_checkpointing and self.training + else lambda x: x + ) + + if return_features: + features = [] + for down_block in self.down_blocks: + sample = checkpoint_fn(down_block)( + sample, downsample_in_time=downsample_in_time + ) + if return_features: + features.append(sample) + + sample = checkpoint_fn(self.mid_block)(sample) + + # post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + if self.latent_log_var == "uniform": + last_channel = sample[:, -1:, ...] + num_dims = sample.dim() + + if num_dims == 4: + # For shape (B, C, H, W) + repeated_last_channel = last_channel.repeat( + 1, sample.shape[1] - 2, 1, 1 + ) + sample = torch.cat([sample, repeated_last_channel], dim=1) + elif num_dims == 5: + # For shape (B, C, F, H, W) + repeated_last_channel = last_channel.repeat( + 1, sample.shape[1] - 2, 1, 1, 1 + ) + sample = torch.cat([sample, repeated_last_channel], dim=1) + else: + raise ValueError(f"Invalid input shape: {sample.shape}") + + if return_features: + features.append(sample[:, : self.latent_channels, ...]) + return sample, features + return sample + + +class Decoder(nn.Module): + r""" + The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample. + + Args: + in_channels (`int`, *optional*, defaults to 3): + The number of input channels. + out_channels (`int`, *optional*, defaults to 3): + The number of output channels. + block_out_channels (`Tuple[int, ...]`, *optional*, defaults to `(64,)`): + The number of output channels for each block. + layers_per_block (`int`, *optional*, defaults to 2): + The number of layers per block. + norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups for normalization. + patch_size (`int`, *optional*, defaults to 1): + The patch size to use. Should be a power of 2. + norm_layer (`str`, *optional*, defaults to `group_norm`): + The normalization layer to use. Can be either `group_norm` or `pixel_norm`. + """ + + def __init__( + self, + dims, + in_channels: int = 3, + out_channels: int = 3, + block_out_channels: Tuple[int, ...] = (64,), + layers_per_block: int = 2, + norm_num_groups: int = 32, + patch_size: int = 1, + norm_layer: str = "group_norm", + patch_size_t: Optional[int] = None, + add_channel_padding: Optional[bool] = False, + ): + super().__init__() + self.patch_size = patch_size + self.patch_size_t = patch_size_t if patch_size_t is not None else patch_size + self.add_channel_padding = add_channel_padding + self.layers_per_block = layers_per_block + if add_channel_padding: + out_channels = out_channels * self.patch_size**3 + else: + out_channels = out_channels * self.patch_size_t * self.patch_size**2 + self.out_channels = out_channels + + self.conv_in = make_conv_nd( + dims, + in_channels, + block_out_channels[-1], + kernel_size=3, + stride=1, + padding=1, + ) + + self.mid_block = None + self.up_blocks = nn.ModuleList([]) + + self.mid_block = UNetMidBlock3D( + dims=dims, + in_channels=block_out_channels[-1], + num_layers=self.layers_per_block, + resnet_eps=1e-6, + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + ) + + reversed_block_out_channels = list(reversed(block_out_channels)) + output_channel = reversed_block_out_channels[0] + for i in range(len(reversed_block_out_channels)): + prev_output_channel = output_channel + output_channel = reversed_block_out_channels[i] + + is_final_block = i == len(block_out_channels) - 1 + + up_block = UpDecoderBlock3D( + dims=dims, + num_layers=self.layers_per_block + 1, + in_channels=prev_output_channel, + out_channels=output_channel, + add_upsample=not is_final_block + and 2 ** (len(block_out_channels) - i - 1) > patch_size, + resnet_eps=1e-6, + resnet_groups=norm_num_groups, + norm_layer=norm_layer, + ) + self.up_blocks.append(up_block) + + if norm_layer == "group_norm": + self.conv_norm_out = nn.GroupNorm( + num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6 + ) + elif norm_layer == "pixel_norm": + self.conv_norm_out = PixelNorm() + + self.conv_act = nn.SiLU() + self.conv_out = make_conv_nd( + dims, block_out_channels[0], out_channels, 3, padding=1 + ) + + self.gradient_checkpointing = False + + def forward(self, sample: torch.FloatTensor, target_shape) -> torch.FloatTensor: + r"""The forward method of the `Decoder` class.""" + assert target_shape is not None, "target_shape must be provided" + upsample_in_time = sample.shape[2] < target_shape[2] + + sample = self.conv_in(sample) + + upscale_dtype = next(iter(self.up_blocks.parameters())).dtype + + checkpoint_fn = ( + partial(torch.utils.checkpoint.checkpoint, use_reentrant=False) + if self.gradient_checkpointing and self.training + else lambda x: x + ) + + sample = checkpoint_fn(self.mid_block)(sample) + sample = sample.to(upscale_dtype) + + for up_block in self.up_blocks: + sample = checkpoint_fn(up_block)(sample, upsample_in_time=upsample_in_time) + + # post-process + sample = self.conv_norm_out(sample) + sample = self.conv_act(sample) + sample = self.conv_out(sample) + + # un-patchify + patch_size_t = self.patch_size_t if upsample_in_time else 1 + sample = unpatchify( + sample, + patch_size_hw=self.patch_size, + patch_size_t=patch_size_t, + add_channel_padding=self.add_channel_padding, + ) + + return sample + + +class DownEncoderBlock3D(nn.Module): + def __init__( + self, + dims: Union[int, Tuple[int, int]], + in_channels: int, + out_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_groups: int = 32, + add_downsample: bool = True, + downsample_padding: int = 1, + norm_layer: str = "group_norm", + ): + super().__init__() + res_blocks = [] + + for i in range(num_layers): + in_channels = in_channels if i == 0 else out_channels + res_blocks.append( + ResnetBlock3D( + dims=dims, + in_channels=in_channels, + out_channels=out_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + norm_layer=norm_layer, + ) + ) + + self.res_blocks = nn.ModuleList(res_blocks) + + if add_downsample: + self.downsample = Downsample3D( + dims, + out_channels, + out_channels=out_channels, + padding=downsample_padding, + ) + else: + self.downsample = Identity() + + def forward( + self, hidden_states: torch.FloatTensor, downsample_in_time + ) -> torch.FloatTensor: + for resnet in self.res_blocks: + hidden_states = resnet(hidden_states) + + hidden_states = self.downsample( + hidden_states, downsample_in_time=downsample_in_time + ) + + return hidden_states + + +class UNetMidBlock3D(nn.Module): + """ + A 3D UNet mid-block [`UNetMidBlock3D`] with multiple residual blocks. + + Args: + in_channels (`int`): The number of input channels. + dropout (`float`, *optional*, defaults to 0.0): The dropout rate. + num_layers (`int`, *optional*, defaults to 1): The number of residual blocks. + resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks. + resnet_groups (`int`, *optional*, defaults to 32): + The number of groups to use in the group normalization layers of the resnet blocks. + + Returns: + `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size, + in_channels, height, width)`. + + """ + + def __init__( + self, + dims: Union[int, Tuple[int, int]], + in_channels: int, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_groups: int = 32, + norm_layer: str = "group_norm", + ): + super().__init__() + resnet_groups = ( + resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) + ) + + self.res_blocks = nn.ModuleList( + [ + ResnetBlock3D( + dims=dims, + in_channels=in_channels, + out_channels=in_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + norm_layer=norm_layer, + ) + for _ in range(num_layers) + ] + ) + + def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor: + for resnet in self.res_blocks: + hidden_states = resnet(hidden_states) + + return hidden_states + + +class UpDecoderBlock3D(nn.Module): + def __init__( + self, + dims: Union[int, Tuple[int, int]], + in_channels: int, + out_channels: int, + resolution_idx: Optional[int] = None, + dropout: float = 0.0, + num_layers: int = 1, + resnet_eps: float = 1e-6, + resnet_groups: int = 32, + add_upsample: bool = True, + norm_layer: str = "group_norm", + ): + super().__init__() + res_blocks = [] + + for i in range(num_layers): + input_channels = in_channels if i == 0 else out_channels + + res_blocks.append( + ResnetBlock3D( + dims=dims, + in_channels=input_channels, + out_channels=out_channels, + eps=resnet_eps, + groups=resnet_groups, + dropout=dropout, + norm_layer=norm_layer, + ) + ) + + self.res_blocks = nn.ModuleList(res_blocks) + + if add_upsample: + self.upsample = Upsample3D( + dims=dims, channels=out_channels, out_channels=out_channels + ) + else: + self.upsample = Identity() + + self.resolution_idx = resolution_idx + + def forward( + self, hidden_states: torch.FloatTensor, upsample_in_time=True + ) -> torch.FloatTensor: + for resnet in self.res_blocks: + hidden_states = resnet(hidden_states) + + hidden_states = self.upsample(hidden_states, upsample_in_time=upsample_in_time) + + return hidden_states + + +class ResnetBlock3D(nn.Module): + r""" + A Resnet block. + + Parameters: + in_channels (`int`): The number of channels in the input. + out_channels (`int`, *optional*, default to be `None`): + The number of output channels for the first conv layer. If None, same as `in_channels`. + dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use. + groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer. + eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization. + """ + + def __init__( + self, + dims: Union[int, Tuple[int, int]], + in_channels: int, + out_channels: Optional[int] = None, + conv_shortcut: bool = False, + dropout: float = 0.0, + groups: int = 32, + eps: float = 1e-6, + norm_layer: str = "group_norm", + ): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + if norm_layer == "group_norm": + self.norm1 = torch.nn.GroupNorm( + num_groups=groups, num_channels=in_channels, eps=eps, affine=True + ) + elif norm_layer == "pixel_norm": + self.norm1 = PixelNorm() + + self.non_linearity = nn.SiLU() + + self.conv1 = make_conv_nd( + dims, in_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + + if norm_layer == "group_norm": + self.norm2 = torch.nn.GroupNorm( + num_groups=groups, num_channels=out_channels, eps=eps, affine=True + ) + elif norm_layer == "pixel_norm": + self.norm2 = PixelNorm() + + self.dropout = torch.nn.Dropout(dropout) + + self.conv2 = make_conv_nd( + dims, out_channels, out_channels, kernel_size=3, stride=1, padding=1 + ) + + self.conv_shortcut = ( + make_linear_nd( + dims=dims, in_channels=in_channels, out_channels=out_channels + ) + if in_channels != out_channels + else nn.Identity() + ) + + def forward( + self, + input_tensor: torch.FloatTensor, + ) -> torch.FloatTensor: + hidden_states = input_tensor + + hidden_states = self.norm1(hidden_states) + + hidden_states = self.non_linearity(hidden_states) + + hidden_states = self.conv1(hidden_states) + + hidden_states = self.norm2(hidden_states) + + hidden_states = self.non_linearity(hidden_states) + + hidden_states = self.dropout(hidden_states) + + hidden_states = self.conv2(hidden_states) + + input_tensor = self.conv_shortcut(input_tensor) + + output_tensor = input_tensor + hidden_states + + return output_tensor + + +class Downsample3D(nn.Module): + def __init__( + self, + dims, + in_channels: int, + out_channels: int, + kernel_size: int = 3, + padding: int = 1, + ): + super().__init__() + stride: int = 2 + self.padding = padding + self.in_channels = in_channels + self.dims = dims + self.conv = make_conv_nd( + dims=dims, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, + stride=stride, + padding=padding, + ) + + def forward(self, x, downsample_in_time=True): + conv = self.conv + if self.padding == 0: + if self.dims == 2: + padding = (0, 1, 0, 1) + else: + padding = (0, 1, 0, 1, 0, 1 if downsample_in_time else 0) + + x = functional.pad(x, padding, mode="constant", value=0) + + if self.dims == (2, 1) and not downsample_in_time: + return conv(x, skip_time_conv=True) + + return conv(x) + + +class Upsample3D(nn.Module): + """ + An upsampling layer for 3D tensors of shape (B, C, D, H, W). + + :param channels: channels in the inputs and outputs. + """ + + def __init__(self, dims, channels, out_channels=None): + super().__init__() + self.dims = dims + self.channels = channels + self.out_channels = out_channels or channels + self.conv = make_conv_nd( + dims, channels, out_channels, kernel_size=3, padding=1, bias=True + ) + + def forward(self, x, upsample_in_time): + if self.dims == 2: + x = functional.interpolate( + x, (x.shape[2] * 2, x.shape[3] * 2), mode="nearest" + ) + else: + time_scale_factor = 2 if upsample_in_time else 1 + # print("before:", x.shape) + b, c, d, h, w = x.shape + x = rearrange(x, "b c d h w -> (b d) c h w") + # height and width interpolate + x = functional.interpolate( + x, (x.shape[2] * 2, x.shape[3] * 2), mode="nearest" + ) + _, _, h, w = x.shape + + if not upsample_in_time and self.dims == (2, 1): + x = rearrange(x, "(b d) c h w -> b c d h w ", b=b, h=h, w=w) + return self.conv(x, skip_time_conv=True) + + # Second ** upsampling ** which is essentially treated as a 1D convolution across the 'd' dimension + x = rearrange(x, "(b d) c h w -> (b h w) c 1 d", b=b) + + # (b h w) c 1 d + new_d = x.shape[-1] * time_scale_factor + x = functional.interpolate(x, (1, new_d), mode="nearest") + # (b h w) c 1 new_d + x = rearrange( + x, "(b h w) c 1 new_d -> b c new_d h w", b=b, h=h, w=w, new_d=new_d + ) + # b c d h w + + # x = functional.interpolate( + # x, (x.shape[2] * time_scale_factor, x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + # ) + # print("after:", x.shape) + + return self.conv(x) + + +def patchify(x, patch_size_hw, patch_size_t=1, add_channel_padding=False): + if patch_size_hw == 1 and patch_size_t == 1: + return x + if x.dim() == 4: + x = rearrange( + x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size_hw, r=patch_size_hw + ) + elif x.dim() == 5: + x = rearrange( + x, + "b c (f p) (h q) (w r) -> b (c p r q) f h w", + p=patch_size_t, + q=patch_size_hw, + r=patch_size_hw, + ) + else: + raise ValueError(f"Invalid input shape: {x.shape}") + + if ( + (x.dim() == 5) + and (patch_size_hw > patch_size_t) + and (patch_size_t > 1 or add_channel_padding) + ): + channels_to_pad = x.shape[1] * (patch_size_hw // patch_size_t) - x.shape[1] + padding_zeros = torch.zeros( + x.shape[0], + channels_to_pad, + x.shape[2], + x.shape[3], + x.shape[4], + device=x.device, + dtype=x.dtype, + ) + x = torch.cat([padding_zeros, x], dim=1) + + return x + + +def unpatchify(x, patch_size_hw, patch_size_t=1, add_channel_padding=False): + if patch_size_hw == 1 and patch_size_t == 1: + return x + + if ( + (x.dim() == 5) + and (patch_size_hw > patch_size_t) + and (patch_size_t > 1 or add_channel_padding) + ): + channels_to_keep = int(x.shape[1] * (patch_size_t / patch_size_hw)) + x = x[:, :channels_to_keep, :, :, :] + + if x.dim() == 4: + x = rearrange( + x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size_hw, r=patch_size_hw + ) + elif x.dim() == 5: + x = rearrange( + x, + "b (c p r q) f h w -> b c (f p) (h q) (w r)", + p=patch_size_t, + q=patch_size_hw, + r=patch_size_hw, + ) + + return x + + +def create_video_autoencoder_config( + latent_channels: int = 4, +): + config = { + "_class_name": "VideoAutoencoder", + "dims": ( + 2, + 1, + ), # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d + "in_channels": 3, # Number of input color channels (e.g., RGB) + "out_channels": 3, # Number of output color channels + "latent_channels": latent_channels, # Number of channels in the latent space representation + "block_out_channels": [ + 128, + 256, + 512, + 512, + ], # Number of output channels of each encoder / decoder inner block + "patch_size": 1, + } + + return config + + +def create_video_autoencoder_pathify4x4x4_config( + latent_channels: int = 4, +): + config = { + "_class_name": "VideoAutoencoder", + "dims": ( + 2, + 1, + ), # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d + "in_channels": 3, # Number of input color channels (e.g., RGB) + "out_channels": 3, # Number of output color channels + "latent_channels": latent_channels, # Number of channels in the latent space representation + "block_out_channels": [512] + * 4, # Number of output channels of each encoder / decoder inner block + "patch_size": 4, + "latent_log_var": "uniform", + } + + return config + + +def create_video_autoencoder_pathify4x4_config( + latent_channels: int = 4, +): + config = { + "_class_name": "VideoAutoencoder", + "dims": 2, # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d + "in_channels": 3, # Number of input color channels (e.g., RGB) + "out_channels": 3, # Number of output color channels + "latent_channels": latent_channels, # Number of channels in the latent space representation + "block_out_channels": [512] + * 4, # Number of output channels of each encoder / decoder inner block + "patch_size": 4, + "norm_layer": "pixel_norm", + } + + return config + + +def test_vae_patchify_unpatchify(): + import torch + + x = torch.randn(2, 3, 8, 64, 64) + x_patched = patchify(x, patch_size_hw=4, patch_size_t=4) + x_unpatched = unpatchify(x_patched, patch_size_hw=4, patch_size_t=4) + assert torch.allclose(x, x_unpatched) + + +def demo_video_autoencoder_forward_backward(): + # Configuration for the VideoAutoencoder + config = create_video_autoencoder_pathify4x4x4_config() + + # Instantiate the VideoAutoencoder with the specified configuration + video_autoencoder = VideoAutoencoder.from_config(config) + + print(video_autoencoder) + + # Print the total number of parameters in the video autoencoder + total_params = sum(p.numel() for p in video_autoencoder.parameters()) + print(f"Total number of parameters in VideoAutoencoder: {total_params:,}") + + # Create a mock input tensor simulating a batch of videos + # Shape: (batch_size, channels, depth, height, width) + # E.g., 4 videos, each with 3 color channels, 16 frames, and 64x64 pixels per frame + input_videos = torch.randn(2, 3, 8, 64, 64) + + # Forward pass: encode and decode the input videos + latent = video_autoencoder.encode(input_videos).latent_dist.mode() + print(f"input shape={input_videos.shape}") + print(f"latent shape={latent.shape}") + reconstructed_videos = video_autoencoder.decode( + latent, target_shape=input_videos.shape + ).sample + + print(f"reconstructed shape={reconstructed_videos.shape}") + + # Calculate the loss (e.g., mean squared error) + loss = torch.nn.functional.mse_loss(input_videos, reconstructed_videos) + + # Perform backward pass + loss.backward() + + print(f"Demo completed with loss: {loss.item()}") + + +# Ensure to call the demo function to execute the forward and backward pass +if __name__ == "__main__": + demo_video_autoencoder_forward_backward() diff --git a/ltx_video/models/transformers/__init__.py b/ltx_video/models/transformers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/models/transformers/attention.py b/ltx_video/models/transformers/attention.py new file mode 100644 index 0000000000000000000000000000000000000000..bee0839ad78bfc33d2e940818edec2701ece99c7 --- /dev/null +++ b/ltx_video/models/transformers/attention.py @@ -0,0 +1,1264 @@ +import inspect +from importlib import import_module +from typing import Any, Dict, Optional, Tuple + +import torch +import torch.nn.functional as F +from diffusers.models.activations import GEGLU, GELU, ApproximateGELU +from diffusers.models.attention import _chunked_feed_forward +from diffusers.models.attention_processor import ( + LoRAAttnAddedKVProcessor, + LoRAAttnProcessor, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + SpatialNorm, +) +from diffusers.models.lora import LoRACompatibleLinear +from diffusers.models.normalization import RMSNorm +from diffusers.utils import deprecate, logging +from diffusers.utils.torch_utils import maybe_allow_in_graph +from einops import rearrange +from torch import nn + +from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy + +try: + from torch_xla.experimental.custom_kernel import flash_attention +except ImportError: + # workaround for automatic tests. Currently this function is manually patched + # to the torch_xla lib on setup of container + pass + +# code adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py + +logger = logging.get_logger(__name__) + + +@maybe_allow_in_graph +class BasicTransformerBlock(nn.Module): + r""" + A basic Transformer block. + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm (: + obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (: + obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + upcast_attention (`bool`, *optional*): + Whether to upcast the attention computation to float32. This is useful for mixed precision training. + norm_elementwise_affine (`bool`, *optional*, defaults to `True`): + Whether to use learnable elementwise affine parameters for normalization. + qk_norm (`str`, *optional*, defaults to None): + Set to 'layer_norm' or `rms_norm` to perform query and key normalization. + adaptive_norm (`str`, *optional*, defaults to `"single_scale_shift"`): + The type of adaptive norm to use. Can be `"single_scale_shift"`, `"single_scale"` or "none". + standardization_norm (`str`, *optional*, defaults to `"layer_norm"`): + The type of pre-normalization to use. Can be `"layer_norm"` or `"rms_norm"`. + final_dropout (`bool` *optional*, defaults to False): + Whether to apply a final dropout after the last feed-forward layer. + attention_type (`str`, *optional*, defaults to `"default"`): + The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`. + positional_embeddings (`str`, *optional*, defaults to `None`): + The type of positional embeddings to apply to. + num_positional_embeddings (`int`, *optional*, defaults to `None`): + The maximum number of positional embeddings to apply. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, # pylint: disable=unused-argument + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + adaptive_norm: str = "single_scale_shift", # 'single_scale_shift', 'single_scale' or 'none' + standardization_norm: str = "layer_norm", # 'layer_norm' or 'rms_norm' + norm_eps: float = 1e-5, + qk_norm: Optional[str] = None, + final_dropout: bool = False, + attention_type: str = "default", # pylint: disable=unused-argument + ff_inner_dim: Optional[int] = None, + ff_bias: bool = True, + attention_out_bias: bool = True, + use_tpu_flash_attention: bool = False, + use_rope: bool = False, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + self.use_tpu_flash_attention = use_tpu_flash_attention + self.adaptive_norm = adaptive_norm + + assert standardization_norm in ["layer_norm", "rms_norm"] + assert adaptive_norm in ["single_scale_shift", "single_scale", "none"] + + make_norm_layer = ( + nn.LayerNorm if standardization_norm == "layer_norm" else RMSNorm + ) + + # Define 3 blocks. Each block has its own normalization layer. + # 1. Self-Attn + self.norm1 = make_norm_layer( + dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps + ) + + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + out_bias=attention_out_bias, + use_tpu_flash_attention=use_tpu_flash_attention, + qk_norm=qk_norm, + use_rope=use_rope, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=( + cross_attention_dim if not double_self_attention else None + ), + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + out_bias=attention_out_bias, + use_tpu_flash_attention=use_tpu_flash_attention, + qk_norm=qk_norm, + use_rope=use_rope, + ) # is self-attn if encoder_hidden_states is none + + if adaptive_norm == "none": + self.attn2_norm = make_norm_layer( + dim, norm_eps, norm_elementwise_affine + ) + else: + self.attn2 = None + self.attn2_norm = None + + self.norm2 = make_norm_layer(dim, norm_eps, norm_elementwise_affine) + + # 3. Feed-forward + self.ff = FeedForward( + dim, + dropout=dropout, + activation_fn=activation_fn, + final_dropout=final_dropout, + inner_dim=ff_inner_dim, + bias=ff_bias, + ) + + # 5. Scale-shift for PixArt-Alpha. + if adaptive_norm != "none": + num_ada_params = 4 if adaptive_norm == "single_scale" else 6 + self.scale_shift_table = nn.Parameter( + torch.randn(num_ada_params, dim) / dim**0.5 + ) + + # let chunk size default to None + self._chunk_size = None + self._chunk_dim = 0 + + def set_use_tpu_flash_attention(self): + r""" + Function sets the flag in this object and propagates down the children. The flag will enforce the usage of TPU + attention kernel. + """ + self.use_tpu_flash_attention = True + self.attn1.set_use_tpu_flash_attention() + self.attn2.set_use_tpu_flash_attention() + + def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0): + # Sets chunk feed-forward + self._chunk_size = chunk_size + self._chunk_dim = dim + + def forward( + self, + hidden_states: torch.FloatTensor, + freqs_cis: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + timestep: Optional[torch.LongTensor] = None, + cross_attention_kwargs: Dict[str, Any] = None, + class_labels: Optional[torch.LongTensor] = None, + skip_layer_mask: Optional[torch.Tensor] = None, + skip_layer_strategy: Optional[SkipLayerStrategy] = None, + ) -> torch.FloatTensor: + if cross_attention_kwargs is not None: + if cross_attention_kwargs.get("scale", None) is not None: + logger.warning( + "Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored." + ) + + # Notice that normalization is always applied before the real computation in the following blocks. + # 0. Self-Attention + batch_size = hidden_states.shape[0] + + original_hidden_states = hidden_states + + norm_hidden_states = self.norm1(hidden_states) + + # Apply ada_norm_single + if self.adaptive_norm in ["single_scale_shift", "single_scale"]: + assert timestep.ndim == 3 # [batch, 1 or num_tokens, embedding_dim] + num_ada_params = self.scale_shift_table.shape[0] + ada_values = self.scale_shift_table[None, None] + timestep.reshape( + batch_size, timestep.shape[1], num_ada_params, -1 + ) + if self.adaptive_norm == "single_scale_shift": + shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ( + ada_values.unbind(dim=2) + ) + norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa + else: + scale_msa, gate_msa, scale_mlp, gate_mlp = ada_values.unbind(dim=2) + norm_hidden_states = norm_hidden_states * (1 + scale_msa) + elif self.adaptive_norm == "none": + scale_msa, gate_msa, scale_mlp, gate_mlp = None, None, None, None + else: + raise ValueError(f"Unknown adaptive norm type: {self.adaptive_norm}") + + norm_hidden_states = norm_hidden_states.squeeze( + 1 + ) # TODO: Check if this is needed + + # 1. Prepare GLIGEN inputs + cross_attention_kwargs = ( + cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {} + ) + + attn_output = self.attn1( + norm_hidden_states, + freqs_cis=freqs_cis, + encoder_hidden_states=( + encoder_hidden_states if self.only_cross_attention else None + ), + attention_mask=attention_mask, + skip_layer_mask=skip_layer_mask, + skip_layer_strategy=skip_layer_strategy, + **cross_attention_kwargs, + ) + if gate_msa is not None: + attn_output = gate_msa * attn_output + + hidden_states = attn_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + # 3. Cross-Attention + if self.attn2 is not None: + if self.adaptive_norm == "none": + attn_input = self.attn2_norm(hidden_states) + else: + attn_input = hidden_states + attn_output = self.attn2( + attn_input, + freqs_cis=freqs_cis, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + hidden_states = attn_output + hidden_states + + # 4. Feed-forward + norm_hidden_states = self.norm2(hidden_states) + if self.adaptive_norm == "single_scale_shift": + norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp + elif self.adaptive_norm == "single_scale": + norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + elif self.adaptive_norm == "none": + pass + else: + raise ValueError(f"Unknown adaptive norm type: {self.adaptive_norm}") + + if self._chunk_size is not None: + # "feed_forward_chunk_size" can be used to save memory + ff_output = _chunked_feed_forward( + self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size + ) + else: + ff_output = self.ff(norm_hidden_states) + if gate_mlp is not None: + ff_output = gate_mlp * ff_output + + hidden_states = ff_output + hidden_states + if hidden_states.ndim == 4: + hidden_states = hidden_states.squeeze(1) + + if ( + skip_layer_mask is not None + and skip_layer_strategy == SkipLayerStrategy.TransformerBlock + ): + skip_layer_mask = skip_layer_mask.view(-1, 1, 1) + hidden_states = hidden_states * skip_layer_mask + original_hidden_states * ( + 1.0 - skip_layer_mask + ) + + return hidden_states + + +@maybe_allow_in_graph +class Attention(nn.Module): + r""" + A cross attention layer. + + Parameters: + query_dim (`int`): + The number of channels in the query. + cross_attention_dim (`int`, *optional*): + The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`. + heads (`int`, *optional*, defaults to 8): + The number of heads to use for multi-head attention. + dim_head (`int`, *optional*, defaults to 64): + The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): + The dropout probability to use. + bias (`bool`, *optional*, defaults to False): + Set to `True` for the query, key, and value linear layers to contain a bias parameter. + upcast_attention (`bool`, *optional*, defaults to False): + Set to `True` to upcast the attention computation to `float32`. + upcast_softmax (`bool`, *optional*, defaults to False): + Set to `True` to upcast the softmax computation to `float32`. + cross_attention_norm (`str`, *optional*, defaults to `None`): + The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`. + cross_attention_norm_num_groups (`int`, *optional*, defaults to 32): + The number of groups to use for the group norm in the cross attention. + added_kv_proj_dim (`int`, *optional*, defaults to `None`): + The number of channels to use for the added key and value projections. If `None`, no projection is used. + norm_num_groups (`int`, *optional*, defaults to `None`): + The number of groups to use for the group norm in the attention. + spatial_norm_dim (`int`, *optional*, defaults to `None`): + The number of channels to use for the spatial normalization. + out_bias (`bool`, *optional*, defaults to `True`): + Set to `True` to use a bias in the output linear layer. + scale_qk (`bool`, *optional*, defaults to `True`): + Set to `True` to scale the query and key by `1 / sqrt(dim_head)`. + qk_norm (`str`, *optional*, defaults to None): + Set to 'layer_norm' or `rms_norm` to perform query and key normalization. + only_cross_attention (`bool`, *optional*, defaults to `False`): + Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if + `added_kv_proj_dim` is not `None`. + eps (`float`, *optional*, defaults to 1e-5): + An additional value added to the denominator in group normalization that is used for numerical stability. + rescale_output_factor (`float`, *optional*, defaults to 1.0): + A factor to rescale the output by dividing it with this value. + residual_connection (`bool`, *optional*, defaults to `False`): + Set to `True` to add the residual connection to the output. + _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`): + Set to `True` if the attention block is loaded from a deprecated state dict. + processor (`AttnProcessor`, *optional*, defaults to `None`): + The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and + `AttnProcessor` otherwise. + """ + + def __init__( + self, + query_dim: int, + cross_attention_dim: Optional[int] = None, + heads: int = 8, + dim_head: int = 64, + dropout: float = 0.0, + bias: bool = False, + upcast_attention: bool = False, + upcast_softmax: bool = False, + cross_attention_norm: Optional[str] = None, + cross_attention_norm_num_groups: int = 32, + added_kv_proj_dim: Optional[int] = None, + norm_num_groups: Optional[int] = None, + spatial_norm_dim: Optional[int] = None, + out_bias: bool = True, + scale_qk: bool = True, + qk_norm: Optional[str] = None, + only_cross_attention: bool = False, + eps: float = 1e-5, + rescale_output_factor: float = 1.0, + residual_connection: bool = False, + _from_deprecated_attn_block: bool = False, + processor: Optional["AttnProcessor"] = None, + out_dim: int = None, + use_tpu_flash_attention: bool = False, + use_rope: bool = False, + ): + super().__init__() + self.inner_dim = out_dim if out_dim is not None else dim_head * heads + self.query_dim = query_dim + self.use_bias = bias + self.is_cross_attention = cross_attention_dim is not None + self.cross_attention_dim = ( + cross_attention_dim if cross_attention_dim is not None else query_dim + ) + self.upcast_attention = upcast_attention + self.upcast_softmax = upcast_softmax + self.rescale_output_factor = rescale_output_factor + self.residual_connection = residual_connection + self.dropout = dropout + self.fused_projections = False + self.out_dim = out_dim if out_dim is not None else query_dim + self.use_tpu_flash_attention = use_tpu_flash_attention + self.use_rope = use_rope + + # we make use of this private variable to know whether this class is loaded + # with an deprecated state dict so that we can convert it on the fly + self._from_deprecated_attn_block = _from_deprecated_attn_block + + self.scale_qk = scale_qk + self.scale = dim_head**-0.5 if self.scale_qk else 1.0 + + if qk_norm is None: + self.q_norm = nn.Identity() + self.k_norm = nn.Identity() + elif qk_norm == "rms_norm": + self.q_norm = RMSNorm(dim_head * heads, eps=1e-5) + self.k_norm = RMSNorm(dim_head * heads, eps=1e-5) + elif qk_norm == "layer_norm": + self.q_norm = nn.LayerNorm(dim_head * heads, eps=1e-5) + self.k_norm = nn.LayerNorm(dim_head * heads, eps=1e-5) + else: + raise ValueError(f"Unsupported qk_norm method: {qk_norm}") + + self.heads = out_dim // dim_head if out_dim is not None else heads + # for slice_size > 0 the attention score computation + # is split across the batch axis to save memory + # You can set slice_size with `set_attention_slice` + self.sliceable_head_dim = heads + + self.added_kv_proj_dim = added_kv_proj_dim + self.only_cross_attention = only_cross_attention + + if self.added_kv_proj_dim is None and self.only_cross_attention: + raise ValueError( + "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`." + ) + + if norm_num_groups is not None: + self.group_norm = nn.GroupNorm( + num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True + ) + else: + self.group_norm = None + + if spatial_norm_dim is not None: + self.spatial_norm = SpatialNorm( + f_channels=query_dim, zq_channels=spatial_norm_dim + ) + else: + self.spatial_norm = None + + if cross_attention_norm is None: + self.norm_cross = None + elif cross_attention_norm == "layer_norm": + self.norm_cross = nn.LayerNorm(self.cross_attention_dim) + elif cross_attention_norm == "group_norm": + if self.added_kv_proj_dim is not None: + # The given `encoder_hidden_states` are initially of shape + # (batch_size, seq_len, added_kv_proj_dim) before being projected + # to (batch_size, seq_len, cross_attention_dim). The norm is applied + # before the projection, so we need to use `added_kv_proj_dim` as + # the number of channels for the group norm. + norm_cross_num_channels = added_kv_proj_dim + else: + norm_cross_num_channels = self.cross_attention_dim + + self.norm_cross = nn.GroupNorm( + num_channels=norm_cross_num_channels, + num_groups=cross_attention_norm_num_groups, + eps=1e-5, + affine=True, + ) + else: + raise ValueError( + f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'" + ) + + linear_cls = nn.Linear + + self.linear_cls = linear_cls + self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias) + + if not self.only_cross_attention: + # only relevant for the `AddedKVProcessor` classes + self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) + self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias) + else: + self.to_k = None + self.to_v = None + + if self.added_kv_proj_dim is not None: + self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim) + self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim) + + self.to_out = nn.ModuleList([]) + self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias)) + self.to_out.append(nn.Dropout(dropout)) + + # set attention processor + # We use the AttnProcessor2_0 by default when torch 2.x is used which uses + # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention + # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1 + if processor is None: + processor = AttnProcessor2_0() + self.set_processor(processor) + + def set_use_tpu_flash_attention(self): + r""" + Function sets the flag in this object. The flag will enforce the usage of TPU attention kernel. + """ + self.use_tpu_flash_attention = True + + def set_processor(self, processor: "AttnProcessor") -> None: + r""" + Set the attention processor to use. + + Args: + processor (`AttnProcessor`): + The attention processor to use. + """ + # if current processor is in `self._modules` and if passed `processor` is not, we need to + # pop `processor` from `self._modules` + if ( + hasattr(self, "processor") + and isinstance(self.processor, torch.nn.Module) + and not isinstance(processor, torch.nn.Module) + ): + logger.info( + f"You are removing possibly trained weights of {self.processor} with {processor}" + ) + self._modules.pop("processor") + + self.processor = processor + + def get_processor( + self, return_deprecated_lora: bool = False + ) -> "AttentionProcessor": # noqa: F821 + r""" + Get the attention processor in use. + + Args: + return_deprecated_lora (`bool`, *optional*, defaults to `False`): + Set to `True` to return the deprecated LoRA attention processor. + + Returns: + "AttentionProcessor": The attention processor in use. + """ + if not return_deprecated_lora: + return self.processor + + # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible + # serialization format for LoRA Attention Processors. It should be deleted once the integration + # with PEFT is completed. + is_lora_activated = { + name: module.lora_layer is not None + for name, module in self.named_modules() + if hasattr(module, "lora_layer") + } + + # 1. if no layer has a LoRA activated we can return the processor as usual + if not any(is_lora_activated.values()): + return self.processor + + # If doesn't apply LoRA do `add_k_proj` or `add_v_proj` + is_lora_activated.pop("add_k_proj", None) + is_lora_activated.pop("add_v_proj", None) + # 2. else it is not posssible that only some layers have LoRA activated + if not all(is_lora_activated.values()): + raise ValueError( + f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}" + ) + + # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor + non_lora_processor_cls_name = self.processor.__class__.__name__ + lora_processor_cls = getattr( + import_module(__name__), "LoRA" + non_lora_processor_cls_name + ) + + hidden_size = self.inner_dim + + # now create a LoRA attention processor from the LoRA layers + if lora_processor_cls in [ + LoRAAttnProcessor, + LoRAAttnProcessor2_0, + LoRAXFormersAttnProcessor, + ]: + kwargs = { + "cross_attention_dim": self.cross_attention_dim, + "rank": self.to_q.lora_layer.rank, + "network_alpha": self.to_q.lora_layer.network_alpha, + "q_rank": self.to_q.lora_layer.rank, + "q_hidden_size": self.to_q.lora_layer.out_features, + "k_rank": self.to_k.lora_layer.rank, + "k_hidden_size": self.to_k.lora_layer.out_features, + "v_rank": self.to_v.lora_layer.rank, + "v_hidden_size": self.to_v.lora_layer.out_features, + "out_rank": self.to_out[0].lora_layer.rank, + "out_hidden_size": self.to_out[0].lora_layer.out_features, + } + + if hasattr(self.processor, "attention_op"): + kwargs["attention_op"] = self.processor.attention_op + + lora_processor = lora_processor_cls(hidden_size, **kwargs) + lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) + lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) + lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) + lora_processor.to_out_lora.load_state_dict( + self.to_out[0].lora_layer.state_dict() + ) + elif lora_processor_cls == LoRAAttnAddedKVProcessor: + lora_processor = lora_processor_cls( + hidden_size, + cross_attention_dim=self.add_k_proj.weight.shape[0], + rank=self.to_q.lora_layer.rank, + network_alpha=self.to_q.lora_layer.network_alpha, + ) + lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict()) + lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict()) + lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict()) + lora_processor.to_out_lora.load_state_dict( + self.to_out[0].lora_layer.state_dict() + ) + + # only save if used + if self.add_k_proj.lora_layer is not None: + lora_processor.add_k_proj_lora.load_state_dict( + self.add_k_proj.lora_layer.state_dict() + ) + lora_processor.add_v_proj_lora.load_state_dict( + self.add_v_proj.lora_layer.state_dict() + ) + else: + lora_processor.add_k_proj_lora = None + lora_processor.add_v_proj_lora = None + else: + raise ValueError(f"{lora_processor_cls} does not exist.") + + return lora_processor + + def forward( + self, + hidden_states: torch.FloatTensor, + freqs_cis: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + skip_layer_mask: Optional[torch.Tensor] = None, + skip_layer_strategy: Optional[SkipLayerStrategy] = None, + **cross_attention_kwargs, + ) -> torch.Tensor: + r""" + The forward method of the `Attention` class. + + Args: + hidden_states (`torch.Tensor`): + The hidden states of the query. + encoder_hidden_states (`torch.Tensor`, *optional*): + The hidden states of the encoder. + attention_mask (`torch.Tensor`, *optional*): + The attention mask to use. If `None`, no mask is applied. + skip_layer_mask (`torch.Tensor`, *optional*): + The skip layer mask to use. If `None`, no mask is applied. + skip_layer_strategy (`SkipLayerStrategy`, *optional*, defaults to `None`): + Controls which layers to skip for spatiotemporal guidance. + **cross_attention_kwargs: + Additional keyword arguments to pass along to the cross attention. + + Returns: + `torch.Tensor`: The output of the attention layer. + """ + # The `Attention` class can call different attention processors / attention functions + # here we simply pass along all tensors to the selected processor class + # For standard processors that are defined here, `**cross_attention_kwargs` is empty + + attn_parameters = set( + inspect.signature(self.processor.__call__).parameters.keys() + ) + unused_kwargs = [ + k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters + ] + if len(unused_kwargs) > 0: + logger.warning( + f"cross_attention_kwargs {unused_kwargs} are not expected by" + f" {self.processor.__class__.__name__} and will be ignored." + ) + cross_attention_kwargs = { + k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters + } + + return self.processor( + self, + hidden_states, + freqs_cis=freqs_cis, + encoder_hidden_states=encoder_hidden_states, + attention_mask=attention_mask, + skip_layer_mask=skip_layer_mask, + skip_layer_strategy=skip_layer_strategy, + **cross_attention_kwargs, + ) + + def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor: + r""" + Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads` + is the number of heads initialized while constructing the `Attention` class. + + Args: + tensor (`torch.Tensor`): The tensor to reshape. + + Returns: + `torch.Tensor`: The reshaped tensor. + """ + head_size = self.heads + batch_size, seq_len, dim = tensor.shape + tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim) + tensor = tensor.permute(0, 2, 1, 3).reshape( + batch_size // head_size, seq_len, dim * head_size + ) + return tensor + + def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor: + r""" + Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is + the number of heads initialized while constructing the `Attention` class. + + Args: + tensor (`torch.Tensor`): The tensor to reshape. + out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is + reshaped to `[batch_size * heads, seq_len, dim // heads]`. + + Returns: + `torch.Tensor`: The reshaped tensor. + """ + + head_size = self.heads + if tensor.ndim == 3: + batch_size, seq_len, dim = tensor.shape + extra_dim = 1 + else: + batch_size, extra_dim, seq_len, dim = tensor.shape + tensor = tensor.reshape( + batch_size, seq_len * extra_dim, head_size, dim // head_size + ) + tensor = tensor.permute(0, 2, 1, 3) + + if out_dim == 3: + tensor = tensor.reshape( + batch_size * head_size, seq_len * extra_dim, dim // head_size + ) + + return tensor + + def get_attention_scores( + self, + query: torch.Tensor, + key: torch.Tensor, + attention_mask: torch.Tensor = None, + ) -> torch.Tensor: + r""" + Compute the attention scores. + + Args: + query (`torch.Tensor`): The query tensor. + key (`torch.Tensor`): The key tensor. + attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied. + + Returns: + `torch.Tensor`: The attention probabilities/scores. + """ + dtype = query.dtype + if self.upcast_attention: + query = query.float() + key = key.float() + + if attention_mask is None: + baddbmm_input = torch.empty( + query.shape[0], + query.shape[1], + key.shape[1], + dtype=query.dtype, + device=query.device, + ) + beta = 0 + else: + baddbmm_input = attention_mask + beta = 1 + + attention_scores = torch.baddbmm( + baddbmm_input, + query, + key.transpose(-1, -2), + beta=beta, + alpha=self.scale, + ) + del baddbmm_input + + if self.upcast_softmax: + attention_scores = attention_scores.float() + + attention_probs = attention_scores.softmax(dim=-1) + del attention_scores + + attention_probs = attention_probs.to(dtype) + + return attention_probs + + def prepare_attention_mask( + self, + attention_mask: torch.Tensor, + target_length: int, + batch_size: int, + out_dim: int = 3, + ) -> torch.Tensor: + r""" + Prepare the attention mask for the attention computation. + + Args: + attention_mask (`torch.Tensor`): + The attention mask to prepare. + target_length (`int`): + The target length of the attention mask. This is the length of the attention mask after padding. + batch_size (`int`): + The batch size, which is used to repeat the attention mask. + out_dim (`int`, *optional*, defaults to `3`): + The output dimension of the attention mask. Can be either `3` or `4`. + + Returns: + `torch.Tensor`: The prepared attention mask. + """ + head_size = self.heads + if attention_mask is None: + return attention_mask + + current_length: int = attention_mask.shape[-1] + if current_length != target_length: + if attention_mask.device.type == "mps": + # HACK: MPS: Does not support padding by greater than dimension of input tensor. + # Instead, we can manually construct the padding tensor. + padding_shape = ( + attention_mask.shape[0], + attention_mask.shape[1], + target_length, + ) + padding = torch.zeros( + padding_shape, + dtype=attention_mask.dtype, + device=attention_mask.device, + ) + attention_mask = torch.cat([attention_mask, padding], dim=2) + else: + # TODO: for pipelines such as stable-diffusion, padding cross-attn mask: + # we want to instead pad by (0, remaining_length), where remaining_length is: + # remaining_length: int = target_length - current_length + # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding + attention_mask = F.pad(attention_mask, (0, target_length), value=0.0) + + if out_dim == 3: + if attention_mask.shape[0] < batch_size * head_size: + attention_mask = attention_mask.repeat_interleave(head_size, dim=0) + elif out_dim == 4: + attention_mask = attention_mask.unsqueeze(1) + attention_mask = attention_mask.repeat_interleave(head_size, dim=1) + + return attention_mask + + def norm_encoder_hidden_states( + self, encoder_hidden_states: torch.Tensor + ) -> torch.Tensor: + r""" + Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the + `Attention` class. + + Args: + encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder. + + Returns: + `torch.Tensor`: The normalized encoder hidden states. + """ + assert ( + self.norm_cross is not None + ), "self.norm_cross must be defined to call self.norm_encoder_hidden_states" + + if isinstance(self.norm_cross, nn.LayerNorm): + encoder_hidden_states = self.norm_cross(encoder_hidden_states) + elif isinstance(self.norm_cross, nn.GroupNorm): + # Group norm norms along the channels dimension and expects + # input to be in the shape of (N, C, *). In this case, we want + # to norm along the hidden dimension, so we need to move + # (batch_size, sequence_length, hidden_size) -> + # (batch_size, hidden_size, sequence_length) + encoder_hidden_states = encoder_hidden_states.transpose(1, 2) + encoder_hidden_states = self.norm_cross(encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states.transpose(1, 2) + else: + assert False + + return encoder_hidden_states + + @staticmethod + def apply_rotary_emb( + input_tensor: torch.Tensor, + freqs_cis: Tuple[torch.FloatTensor, torch.FloatTensor], + ) -> Tuple[torch.Tensor, torch.Tensor]: + cos_freqs = freqs_cis[0] + sin_freqs = freqs_cis[1] + + t_dup = rearrange(input_tensor, "... (d r) -> ... d r", r=2) + t1, t2 = t_dup.unbind(dim=-1) + t_dup = torch.stack((-t2, t1), dim=-1) + input_tensor_rot = rearrange(t_dup, "... d r -> ... (d r)") + + out = input_tensor * cos_freqs + input_tensor_rot * sin_freqs + + return out + + +class AttnProcessor2_0: + r""" + Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0). + """ + + def __init__(self): + pass + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + freqs_cis: Tuple[torch.FloatTensor, torch.FloatTensor], + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + skip_layer_mask: Optional[torch.FloatTensor] = None, + skip_layer_strategy: Optional[SkipLayerStrategy] = None, + *args, + **kwargs, + ) -> torch.FloatTensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + residual = hidden_states + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + + if skip_layer_mask is not None: + skip_layer_mask = skip_layer_mask.reshape(batch_size, 1, 1) + + if (attention_mask is not None) and (not attn.use_tpu_flash_attention): + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + # scaled_dot_product_attention expects attention_mask shape to be + # (batch, heads, source_length, target_length) + attention_mask = attention_mask.view( + batch_size, attn.heads, -1, attention_mask.shape[-1] + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + query = attn.q_norm(query) + + if encoder_hidden_states is not None: + if attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states( + encoder_hidden_states + ) + key = attn.to_k(encoder_hidden_states) + key = attn.k_norm(key) + else: # if no context provided do self-attention + encoder_hidden_states = hidden_states + key = attn.to_k(hidden_states) + key = attn.k_norm(key) + if attn.use_rope: + key = attn.apply_rotary_emb(key, freqs_cis) + query = attn.apply_rotary_emb(query, freqs_cis) + + value = attn.to_v(encoder_hidden_states) + value_for_stg = value + + inner_dim = key.shape[-1] + head_dim = inner_dim // attn.heads + + query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) + + # the output of sdp = (batch, num_heads, seq_len, head_dim) + + if attn.use_tpu_flash_attention: # use tpu attention offload 'flash attention' + q_segment_indexes = None + if ( + attention_mask is not None + ): # if mask is required need to tune both segmenIds fields + # attention_mask = torch.squeeze(attention_mask).to(torch.float32) + attention_mask = attention_mask.to(torch.float32) + q_segment_indexes = torch.ones( + batch_size, query.shape[2], device=query.device, dtype=torch.float32 + ) + assert ( + attention_mask.shape[1] == key.shape[2] + ), f"ERROR: KEY SHAPE must be same as attention mask [{key.shape[2]}, {attention_mask.shape[1]}]" + + assert ( + query.shape[2] % 128 == 0 + ), f"ERROR: QUERY SHAPE must be divisible by 128 (TPU limitation) [{query.shape[2]}]" + assert ( + key.shape[2] % 128 == 0 + ), f"ERROR: KEY SHAPE must be divisible by 128 (TPU limitation) [{key.shape[2]}]" + + # run the TPU kernel implemented in jax with pallas + hidden_states_a = flash_attention( + q=query, + k=key, + v=value, + q_segment_ids=q_segment_indexes, + kv_segment_ids=attention_mask, + sm_scale=attn.scale, + ) + else: + hidden_states_a = F.scaled_dot_product_attention( + query, + key, + value, + attn_mask=attention_mask, + dropout_p=0.0, + is_causal=False, + ) + + hidden_states_a = hidden_states_a.transpose(1, 2).reshape( + batch_size, -1, attn.heads * head_dim + ) + hidden_states_a = hidden_states_a.to(query.dtype) + + if ( + skip_layer_mask is not None + and skip_layer_strategy == SkipLayerStrategy.AttentionSkip + ): + hidden_states = hidden_states_a * skip_layer_mask + hidden_states * ( + 1.0 - skip_layer_mask + ) + elif ( + skip_layer_mask is not None + and skip_layer_strategy == SkipLayerStrategy.AttentionValues + ): + hidden_states = hidden_states_a * skip_layer_mask + value_for_stg * ( + 1.0 - skip_layer_mask + ) + else: + hidden_states = hidden_states_a + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + if ( + skip_layer_mask is not None + and skip_layer_strategy == SkipLayerStrategy.Residual + ): + skip_layer_mask = skip_layer_mask.reshape(batch_size, 1, 1, 1) + + if attn.residual_connection: + if ( + skip_layer_mask is not None + and skip_layer_strategy == SkipLayerStrategy.Residual + ): + hidden_states = hidden_states + residual * skip_layer_mask + else: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class AttnProcessor: + r""" + Default processor for performing attention-related computations. + """ + + def __call__( + self, + attn: Attention, + hidden_states: torch.FloatTensor, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + attention_mask: Optional[torch.FloatTensor] = None, + temb: Optional[torch.FloatTensor] = None, + *args, + **kwargs, + ) -> torch.Tensor: + if len(args) > 0 or kwargs.get("scale", None) is not None: + deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`." + deprecate("scale", "1.0.0", deprecation_message) + + residual = hidden_states + + if attn.spatial_norm is not None: + hidden_states = attn.spatial_norm(hidden_states, temb) + + input_ndim = hidden_states.ndim + + if input_ndim == 4: + batch_size, channel, height, width = hidden_states.shape + hidden_states = hidden_states.view( + batch_size, channel, height * width + ).transpose(1, 2) + + batch_size, sequence_length, _ = ( + hidden_states.shape + if encoder_hidden_states is None + else encoder_hidden_states.shape + ) + attention_mask = attn.prepare_attention_mask( + attention_mask, sequence_length, batch_size + ) + + if attn.group_norm is not None: + hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose( + 1, 2 + ) + + query = attn.to_q(hidden_states) + + if encoder_hidden_states is None: + encoder_hidden_states = hidden_states + elif attn.norm_cross: + encoder_hidden_states = attn.norm_encoder_hidden_states( + encoder_hidden_states + ) + + key = attn.to_k(encoder_hidden_states) + value = attn.to_v(encoder_hidden_states) + + query = attn.head_to_batch_dim(query) + key = attn.head_to_batch_dim(key) + value = attn.head_to_batch_dim(value) + + query = attn.q_norm(query) + key = attn.k_norm(key) + + attention_probs = attn.get_attention_scores(query, key, attention_mask) + hidden_states = torch.bmm(attention_probs, value) + hidden_states = attn.batch_to_head_dim(hidden_states) + + # linear proj + hidden_states = attn.to_out[0](hidden_states) + # dropout + hidden_states = attn.to_out[1](hidden_states) + + if input_ndim == 4: + hidden_states = hidden_states.transpose(-1, -2).reshape( + batch_size, channel, height, width + ) + + if attn.residual_connection: + hidden_states = hidden_states + residual + + hidden_states = hidden_states / attn.rescale_output_factor + + return hidden_states + + +class FeedForward(nn.Module): + r""" + A feed-forward layer. + + Parameters: + dim (`int`): The number of channels in the input. + dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. + mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. + bias (`bool`, defaults to True): Whether to use a bias in the linear layer. + """ + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + mult: int = 4, + dropout: float = 0.0, + activation_fn: str = "geglu", + final_dropout: bool = False, + inner_dim=None, + bias: bool = True, + ): + super().__init__() + if inner_dim is None: + inner_dim = int(dim * mult) + dim_out = dim_out if dim_out is not None else dim + linear_cls = nn.Linear + + if activation_fn == "gelu": + act_fn = GELU(dim, inner_dim, bias=bias) + elif activation_fn == "gelu-approximate": + act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias) + elif activation_fn == "geglu": + act_fn = GEGLU(dim, inner_dim, bias=bias) + elif activation_fn == "geglu-approximate": + act_fn = ApproximateGELU(dim, inner_dim, bias=bias) + else: + raise ValueError(f"Unsupported activation function: {activation_fn}") + + self.net = nn.ModuleList([]) + # project in + self.net.append(act_fn) + # project dropout + self.net.append(nn.Dropout(dropout)) + # project out + self.net.append(linear_cls(inner_dim, dim_out, bias=bias)) + # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout + if final_dropout: + self.net.append(nn.Dropout(dropout)) + + def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor: + compatible_cls = (GEGLU, LoRACompatibleLinear) + for module in self.net: + if isinstance(module, compatible_cls): + hidden_states = module(hidden_states, scale) + else: + hidden_states = module(hidden_states) + return hidden_states diff --git a/ltx_video/models/transformers/embeddings.py b/ltx_video/models/transformers/embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..a30d6be16b4f3fe709cf24465e06eb798889ba66 --- /dev/null +++ b/ltx_video/models/transformers/embeddings.py @@ -0,0 +1,129 @@ +# Adapted from: https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/embeddings.py +import math + +import numpy as np +import torch +from einops import rearrange +from torch import nn + + +def get_timestep_embedding( + timesteps: torch.Tensor, + embedding_dim: int, + flip_sin_to_cos: bool = False, + downscale_freq_shift: float = 1, + scale: float = 1, + max_period: int = 10000, +): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: Create sinusoidal timestep embeddings. + + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param embedding_dim: the dimension of the output. :param max_period: controls the minimum frequency of the + embeddings. :return: an [N x dim] Tensor of positional embeddings. + """ + assert len(timesteps.shape) == 1, "Timesteps should be a 1d-array" + + half_dim = embedding_dim // 2 + exponent = -math.log(max_period) * torch.arange( + start=0, end=half_dim, dtype=torch.float32, device=timesteps.device + ) + exponent = exponent / (half_dim - downscale_freq_shift) + + emb = torch.exp(exponent) + emb = timesteps[:, None].float() * emb[None, :] + + # scale embeddings + emb = scale * emb + + # concat sine and cosine embeddings + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=-1) + + # flip sine and cosine embeddings + if flip_sin_to_cos: + emb = torch.cat([emb[:, half_dim:], emb[:, :half_dim]], dim=-1) + + # zero pad + if embedding_dim % 2 == 1: + emb = torch.nn.functional.pad(emb, (0, 1, 0, 0)) + return emb + + +def get_3d_sincos_pos_embed(embed_dim, grid, w, h, f): + """ + grid_size: int of the grid height and width return: pos_embed: [grid_size*grid_size, embed_dim] or + [1+grid_size*grid_size, embed_dim] (w/ or w/o cls_token) + """ + grid = rearrange(grid, "c (f h w) -> c f h w", h=h, w=w) + grid = rearrange(grid, "c f h w -> c h w f", h=h, w=w) + grid = grid.reshape([3, 1, w, h, f]) + pos_embed = get_3d_sincos_pos_embed_from_grid(embed_dim, grid) + pos_embed = pos_embed.transpose(1, 0, 2, 3) + return rearrange(pos_embed, "h w f c -> (f h w) c") + + +def get_3d_sincos_pos_embed_from_grid(embed_dim, grid): + if embed_dim % 3 != 0: + raise ValueError("embed_dim must be divisible by 3") + + # use half of dimensions to encode grid_h + emb_f = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[0]) # (H*W*T, D/3) + emb_h = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[1]) # (H*W*T, D/3) + emb_w = get_1d_sincos_pos_embed_from_grid(embed_dim // 3, grid[2]) # (H*W*T, D/3) + + emb = np.concatenate([emb_h, emb_w, emb_f], axis=-1) # (H*W*T, D) + return emb + + +def get_1d_sincos_pos_embed_from_grid(embed_dim, pos): + """ + embed_dim: output dimension for each position pos: a list of positions to be encoded: size (M,) out: (M, D) + """ + if embed_dim % 2 != 0: + raise ValueError("embed_dim must be divisible by 2") + + omega = np.arange(embed_dim // 2, dtype=np.float64) + omega /= embed_dim / 2.0 + omega = 1.0 / 10000**omega # (D/2,) + + pos_shape = pos.shape + + pos = pos.reshape(-1) + out = np.einsum("m,d->md", pos, omega) # (M, D/2), outer product + out = out.reshape([*pos_shape, -1])[0] + + emb_sin = np.sin(out) # (M, D/2) + emb_cos = np.cos(out) # (M, D/2) + + emb = np.concatenate([emb_sin, emb_cos], axis=-1) # (M, D) + return emb + + +class SinusoidalPositionalEmbedding(nn.Module): + """Apply positional information to a sequence of embeddings. + + Takes in a sequence of embeddings with shape (batch_size, seq_length, embed_dim) and adds positional embeddings to + them + + Args: + embed_dim: (int): Dimension of the positional embedding. + max_seq_length: Maximum sequence length to apply positional embeddings + + """ + + def __init__(self, embed_dim: int, max_seq_length: int = 32): + super().__init__() + position = torch.arange(max_seq_length).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, embed_dim, 2) * (-math.log(10000.0) / embed_dim) + ) + pe = torch.zeros(1, max_seq_length, embed_dim) + pe[0, :, 0::2] = torch.sin(position * div_term) + pe[0, :, 1::2] = torch.cos(position * div_term) + self.register_buffer("pe", pe) + + def forward(self, x): + _, seq_length, _ = x.shape + x = x + self.pe[:, :seq_length] + return x diff --git a/ltx_video/models/transformers/symmetric_patchifier.py b/ltx_video/models/transformers/symmetric_patchifier.py new file mode 100644 index 0000000000000000000000000000000000000000..2eca32033eef03c0dbffd7a25cca993bbda57ded --- /dev/null +++ b/ltx_video/models/transformers/symmetric_patchifier.py @@ -0,0 +1,84 @@ +from abc import ABC, abstractmethod +from typing import Tuple + +import torch +from diffusers.configuration_utils import ConfigMixin +from einops import rearrange +from torch import Tensor + + +class Patchifier(ConfigMixin, ABC): + def __init__(self, patch_size: int): + super().__init__() + self._patch_size = (1, patch_size, patch_size) + + @abstractmethod + def patchify(self, latents: Tensor) -> Tuple[Tensor, Tensor]: + raise NotImplementedError("Patchify method not implemented") + + @abstractmethod + def unpatchify( + self, + latents: Tensor, + output_height: int, + output_width: int, + out_channels: int, + ) -> Tuple[Tensor, Tensor]: + pass + + @property + def patch_size(self): + return self._patch_size + + def get_latent_coords( + self, latent_num_frames, latent_height, latent_width, batch_size, device + ): + """ + Return a tensor of shape [batch_size, 3, num_patches] containing the + top-left corner latent coordinates of each latent patch. + The tensor is repeated for each batch element. + """ + latent_sample_coords = torch.meshgrid( + torch.arange(0, latent_num_frames, self._patch_size[0], device=device), + torch.arange(0, latent_height, self._patch_size[1], device=device), + torch.arange(0, latent_width, self._patch_size[2], device=device), + ) + latent_sample_coords = torch.stack(latent_sample_coords, dim=0) + latent_coords = latent_sample_coords.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1) + latent_coords = rearrange( + latent_coords, "b c f h w -> b c (f h w)", b=batch_size + ) + return latent_coords + + +class SymmetricPatchifier(Patchifier): + def patchify(self, latents: Tensor) -> Tuple[Tensor, Tensor]: + b, _, f, h, w = latents.shape + latent_coords = self.get_latent_coords(f, h, w, b, latents.device) + latents = rearrange( + latents, + "b c (f p1) (h p2) (w p3) -> b (f h w) (c p1 p2 p3)", + p1=self._patch_size[0], + p2=self._patch_size[1], + p3=self._patch_size[2], + ) + return latents, latent_coords + + def unpatchify( + self, + latents: Tensor, + output_height: int, + output_width: int, + out_channels: int, + ) -> Tuple[Tensor, Tensor]: + output_height = output_height // self._patch_size[1] + output_width = output_width // self._patch_size[2] + latents = rearrange( + latents, + "b (f h w) (c p q) -> b c f (h p) (w q)", + h=output_height, + w=output_width, + p=self._patch_size[1], + q=self._patch_size[2], + ) + return latents diff --git a/ltx_video/models/transformers/transformer3d.py b/ltx_video/models/transformers/transformer3d.py new file mode 100644 index 0000000000000000000000000000000000000000..3dc08d8e3f1669287bca04135fd63498385d014d --- /dev/null +++ b/ltx_video/models/transformers/transformer3d.py @@ -0,0 +1,507 @@ +# Adapted from: https://github.com/huggingface/diffusers/blob/v0.26.3/src/diffusers/models/transformers/transformer_2d.py +import math +from dataclasses import dataclass +from typing import Any, Dict, List, Optional, Union +import os +import json +import glob +from pathlib import Path + +import torch +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.models.embeddings import PixArtAlphaTextProjection +from diffusers.models.modeling_utils import ModelMixin +from diffusers.models.normalization import AdaLayerNormSingle +from diffusers.utils import BaseOutput, is_torch_version +from diffusers.utils import logging +from torch import nn +from safetensors import safe_open + + +from ltx_video.models.transformers.attention import BasicTransformerBlock +from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy + +from ltx_video.utils.diffusers_config_mapping import ( + diffusers_and_ours_config_mapping, + make_hashable_key, + TRANSFORMER_KEYS_RENAME_DICT, +) + + +logger = logging.get_logger(__name__) + + +@dataclass +class Transformer3DModelOutput(BaseOutput): + """ + The output of [`Transformer2DModel`]. + + Args: + sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): + The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability + distributions for the unnoised latent pixels. + """ + + sample: torch.FloatTensor + + +class Transformer3DModel(ModelMixin, ConfigMixin): + _supports_gradient_checkpointing = True + + @register_to_config + def __init__( + self, + num_attention_heads: int = 16, + attention_head_dim: int = 88, + in_channels: Optional[int] = None, + out_channels: Optional[int] = None, + num_layers: int = 1, + dropout: float = 0.0, + norm_num_groups: int = 32, + cross_attention_dim: Optional[int] = None, + attention_bias: bool = False, + num_vector_embeds: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + use_linear_projection: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + adaptive_norm: str = "single_scale_shift", # 'single_scale_shift' or 'single_scale' + standardization_norm: str = "layer_norm", # 'layer_norm' or 'rms_norm' + norm_elementwise_affine: bool = True, + norm_eps: float = 1e-5, + attention_type: str = "default", + caption_channels: int = None, + use_tpu_flash_attention: bool = False, # if True uses the TPU attention offload ('flash attention') + qk_norm: Optional[str] = None, + positional_embedding_type: str = "rope", + positional_embedding_theta: Optional[float] = None, + positional_embedding_max_pos: Optional[List[int]] = None, + timestep_scale_multiplier: Optional[float] = None, + causal_temporal_positioning: bool = False, # For backward compatibility, will be deprecated + ): + super().__init__() + self.use_tpu_flash_attention = ( + use_tpu_flash_attention # FIXME: push config down to the attention modules + ) + self.use_linear_projection = use_linear_projection + self.num_attention_heads = num_attention_heads + self.attention_head_dim = attention_head_dim + inner_dim = num_attention_heads * attention_head_dim + self.inner_dim = inner_dim + self.patchify_proj = nn.Linear(in_channels, inner_dim, bias=True) + self.positional_embedding_type = positional_embedding_type + self.positional_embedding_theta = positional_embedding_theta + self.positional_embedding_max_pos = positional_embedding_max_pos + self.use_rope = self.positional_embedding_type == "rope" + self.timestep_scale_multiplier = timestep_scale_multiplier + + if self.positional_embedding_type == "absolute": + raise ValueError("Absolute positional embedding is no longer supported") + elif self.positional_embedding_type == "rope": + if positional_embedding_theta is None: + raise ValueError( + "If `positional_embedding_type` type is rope, `positional_embedding_theta` must also be defined" + ) + if positional_embedding_max_pos is None: + raise ValueError( + "If `positional_embedding_type` type is rope, `positional_embedding_max_pos` must also be defined" + ) + + # 3. Define transformers blocks + self.transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + inner_dim, + num_attention_heads, + attention_head_dim, + dropout=dropout, + cross_attention_dim=cross_attention_dim, + activation_fn=activation_fn, + num_embeds_ada_norm=num_embeds_ada_norm, + attention_bias=attention_bias, + only_cross_attention=only_cross_attention, + double_self_attention=double_self_attention, + upcast_attention=upcast_attention, + adaptive_norm=adaptive_norm, + standardization_norm=standardization_norm, + norm_elementwise_affine=norm_elementwise_affine, + norm_eps=norm_eps, + attention_type=attention_type, + use_tpu_flash_attention=use_tpu_flash_attention, + qk_norm=qk_norm, + use_rope=self.use_rope, + ) + for d in range(num_layers) + ] + ) + + # 4. Define output layers + self.out_channels = in_channels if out_channels is None else out_channels + self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) + self.scale_shift_table = nn.Parameter( + torch.randn(2, inner_dim) / inner_dim**0.5 + ) + self.proj_out = nn.Linear(inner_dim, self.out_channels) + + self.adaln_single = AdaLayerNormSingle( + inner_dim, use_additional_conditions=False + ) + if adaptive_norm == "single_scale": + self.adaln_single.linear = nn.Linear(inner_dim, 4 * inner_dim, bias=True) + + self.caption_projection = None + if caption_channels is not None: + self.caption_projection = PixArtAlphaTextProjection( + in_features=caption_channels, hidden_size=inner_dim + ) + + self.gradient_checkpointing = False + + def set_use_tpu_flash_attention(self): + r""" + Function sets the flag in this object and propagates down the children. The flag will enforce the usage of TPU + attention kernel. + """ + logger.info("ENABLE TPU FLASH ATTENTION -> TRUE") + self.use_tpu_flash_attention = True + # push config down to the attention modules + for block in self.transformer_blocks: + block.set_use_tpu_flash_attention() + + def create_skip_layer_mask( + self, + batch_size: int, + num_conds: int, + ptb_index: int, + skip_block_list: Optional[List[int]] = None, + ): + if skip_block_list is None or len(skip_block_list) == 0: + return None + num_layers = len(self.transformer_blocks) + mask = torch.ones( + (num_layers, batch_size * num_conds), device=self.device, dtype=self.dtype + ) + for block_idx in skip_block_list: + mask[block_idx, ptb_index::num_conds] = 0 + return mask + + def _set_gradient_checkpointing(self, module, value=False): + if hasattr(module, "gradient_checkpointing"): + module.gradient_checkpointing = value + + def get_fractional_positions(self, indices_grid): + fractional_positions = torch.stack( + [ + indices_grid[:, i] / self.positional_embedding_max_pos[i] + for i in range(3) + ], + dim=-1, + ) + return fractional_positions + + def precompute_freqs_cis(self, indices_grid, spacing="exp"): + dtype = torch.float32 # We need full precision in the freqs_cis computation. + dim = self.inner_dim + theta = self.positional_embedding_theta + + fractional_positions = self.get_fractional_positions(indices_grid) + + start = 1 + end = theta + device = fractional_positions.device + if spacing == "exp": + indices = theta ** ( + torch.linspace( + math.log(start, theta), + math.log(end, theta), + dim // 6, + device=device, + dtype=dtype, + ) + ) + indices = indices.to(dtype=dtype) + elif spacing == "exp_2": + indices = 1.0 / theta ** (torch.arange(0, dim, 6, device=device) / dim) + indices = indices.to(dtype=dtype) + elif spacing == "linear": + indices = torch.linspace(start, end, dim // 6, device=device, dtype=dtype) + elif spacing == "sqrt": + indices = torch.linspace( + start**2, end**2, dim // 6, device=device, dtype=dtype + ).sqrt() + + indices = indices * math.pi / 2 + + if spacing == "exp_2": + freqs = ( + (indices * fractional_positions.unsqueeze(-1)) + .transpose(-1, -2) + .flatten(2) + ) + else: + freqs = ( + (indices * (fractional_positions.unsqueeze(-1) * 2 - 1)) + .transpose(-1, -2) + .flatten(2) + ) + + cos_freq = freqs.cos().repeat_interleave(2, dim=-1) + sin_freq = freqs.sin().repeat_interleave(2, dim=-1) + if dim % 6 != 0: + cos_padding = torch.ones_like(cos_freq[:, :, : dim % 6]) + sin_padding = torch.zeros_like(cos_freq[:, :, : dim % 6]) + cos_freq = torch.cat([cos_padding, cos_freq], dim=-1) + sin_freq = torch.cat([sin_padding, sin_freq], dim=-1) + return cos_freq.to(self.dtype), sin_freq.to(self.dtype) + + def load_state_dict( + self, + state_dict: Dict, + *args, + **kwargs, + ): + if any([key.startswith("model.diffusion_model.") for key in state_dict.keys()]): + state_dict = { + key.replace("model.diffusion_model.", ""): value + for key, value in state_dict.items() + if key.startswith("model.diffusion_model.") + } + super().load_state_dict(state_dict, *args, **kwargs) + + @classmethod + def from_pretrained( + cls, + pretrained_model_path: Optional[Union[str, os.PathLike]], + *args, + **kwargs, + ): + pretrained_model_path = Path(pretrained_model_path) + if pretrained_model_path.is_dir(): + config_path = pretrained_model_path / "transformer" / "config.json" + with open(config_path, "r") as f: + config = make_hashable_key(json.load(f)) + + assert config in diffusers_and_ours_config_mapping, ( + "Provided diffusers checkpoint config for transformer is not suppported. " + "We only support diffusers configs found in Lightricks/LTX-Video." + ) + + config = diffusers_and_ours_config_mapping[config] + state_dict = {} + ckpt_paths = ( + pretrained_model_path + / "transformer" + / "diffusion_pytorch_model*.safetensors" + ) + dict_list = glob.glob(str(ckpt_paths)) + for dict_path in dict_list: + part_dict = {} + with safe_open(dict_path, framework="pt", device="cpu") as f: + for k in f.keys(): + part_dict[k] = f.get_tensor(k) + state_dict.update(part_dict) + + for key in list(state_dict.keys()): + new_key = key + for replace_key, rename_key in TRANSFORMER_KEYS_RENAME_DICT.items(): + new_key = new_key.replace(replace_key, rename_key) + state_dict[new_key] = state_dict.pop(key) + + with torch.device("meta"): + transformer = cls.from_config(config) + transformer.load_state_dict(state_dict, assign=True, strict=True) + elif pretrained_model_path.is_file() and str(pretrained_model_path).endswith( + ".safetensors" + ): + comfy_single_file_state_dict = {} + with safe_open(pretrained_model_path, framework="pt", device="cpu") as f: + metadata = f.metadata() + for k in f.keys(): + comfy_single_file_state_dict[k] = f.get_tensor(k) + configs = json.loads(metadata["config"]) + transformer_config = configs["transformer"] + with torch.device("meta"): + transformer = Transformer3DModel.from_config(transformer_config) + transformer.load_state_dict(comfy_single_file_state_dict, assign=True) + return transformer + + def forward( + self, + hidden_states: torch.Tensor, + indices_grid: torch.Tensor, + encoder_hidden_states: Optional[torch.Tensor] = None, + timestep: Optional[torch.LongTensor] = None, + class_labels: Optional[torch.LongTensor] = None, + cross_attention_kwargs: Dict[str, Any] = None, + attention_mask: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + skip_layer_mask: Optional[torch.Tensor] = None, + skip_layer_strategy: Optional[SkipLayerStrategy] = None, + return_dict: bool = True, + ): + """ + The [`Transformer2DModel`] forward method. + + Args: + hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous): + Input `hidden_states`. + indices_grid (`torch.LongTensor` of shape `(batch size, 3, num latent pixels)`): + encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*): + Conditional embeddings for cross attention layer. If not given, cross-attention defaults to + self-attention. + timestep ( `torch.LongTensor`, *optional*): + Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`. + class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): + Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in + `AdaLayerZeroNorm`. + cross_attention_kwargs ( `Dict[str, Any]`, *optional*): + A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under + `self.processor` in + [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). + attention_mask ( `torch.Tensor`, *optional*): + An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask + is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large + negative values to the attention scores corresponding to "discard" tokens. + encoder_attention_mask ( `torch.Tensor`, *optional*): + Cross-attention mask applied to `encoder_hidden_states`. Two formats supported: + + * Mask `(batch, sequence_length)` True = keep, False = discard. + * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard. + + If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format + above. This bias will be added to the cross-attention scores. + skip_layer_mask ( `torch.Tensor`, *optional*): + A mask of shape `(num_layers, batch)` that indicates which layers to skip. `0` at position + `layer, batch_idx` indicates that the layer should be skipped for the corresponding batch index. + skip_layer_strategy ( `SkipLayerStrategy`, *optional*, defaults to `None`): + Controls which layers are skipped when calculating a perturbed latent for spatiotemporal guidance. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain + tuple. + + Returns: + If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a + `tuple` where the first element is the sample tensor. + """ + # for tpu attention offload 2d token masks are used. No need to transform. + if not self.use_tpu_flash_attention: + # ensure attention_mask is a bias, and give it a singleton query_tokens dimension. + # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward. + # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias. + # expects mask of shape: + # [batch, key_tokens] + # adds singleton query_tokens dimension: + # [batch, 1, key_tokens] + # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes: + # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn) + # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn) + if attention_mask is not None and attention_mask.ndim == 2: + # assume that mask is expressed as: + # (1 = keep, 0 = discard) + # convert mask into a bias that can be added to attention scores: + # (keep = +0, discard = -10000.0) + attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0 + attention_mask = attention_mask.unsqueeze(1) + + # convert encoder_attention_mask to a bias the same way we do for attention_mask + if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2: + encoder_attention_mask = ( + 1 - encoder_attention_mask.to(hidden_states.dtype) + ) * -10000.0 + encoder_attention_mask = encoder_attention_mask.unsqueeze(1) + + # 1. Input + hidden_states = self.patchify_proj(hidden_states) + + if self.timestep_scale_multiplier: + timestep = self.timestep_scale_multiplier * timestep + + freqs_cis = self.precompute_freqs_cis(indices_grid) + + batch_size = hidden_states.shape[0] + timestep, embedded_timestep = self.adaln_single( + timestep.flatten(), + {"resolution": None, "aspect_ratio": None}, + batch_size=batch_size, + hidden_dtype=hidden_states.dtype, + ) + # Second dimension is 1 or number of tokens (if timestep_per_token) + timestep = timestep.view(batch_size, -1, timestep.shape[-1]) + embedded_timestep = embedded_timestep.view( + batch_size, -1, embedded_timestep.shape[-1] + ) + + # 2. Blocks + if self.caption_projection is not None: + batch_size = hidden_states.shape[0] + encoder_hidden_states = self.caption_projection(encoder_hidden_states) + encoder_hidden_states = encoder_hidden_states.view( + batch_size, -1, hidden_states.shape[-1] + ) + + for block_idx, block in enumerate(self.transformer_blocks): + if self.training and self.gradient_checkpointing: + + def create_custom_forward(module, return_dict=None): + def custom_forward(*inputs): + if return_dict is not None: + return module(*inputs, return_dict=return_dict) + else: + return module(*inputs) + + return custom_forward + + ckpt_kwargs: Dict[str, Any] = ( + {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {} + ) + hidden_states = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + hidden_states, + freqs_cis, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + timestep, + cross_attention_kwargs, + class_labels, + ( + skip_layer_mask[block_idx] + if skip_layer_mask is not None + else None + ), + skip_layer_strategy, + **ckpt_kwargs, + ) + else: + hidden_states = block( + hidden_states, + freqs_cis=freqs_cis, + attention_mask=attention_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + timestep=timestep, + cross_attention_kwargs=cross_attention_kwargs, + class_labels=class_labels, + skip_layer_mask=( + skip_layer_mask[block_idx] + if skip_layer_mask is not None + else None + ), + skip_layer_strategy=skip_layer_strategy, + ) + + # 3. Output + scale_shift_values = ( + self.scale_shift_table[None, None] + embedded_timestep[:, :, None] + ) + shift, scale = scale_shift_values[:, :, 0], scale_shift_values[:, :, 1] + hidden_states = self.norm_out(hidden_states) + # Modulation + hidden_states = hidden_states * (1 + scale) + shift + hidden_states = self.proj_out(hidden_states) + if not return_dict: + return (hidden_states,) + + return Transformer3DModelOutput(sample=hidden_states) diff --git a/ltx_video/pipelines/__init__.py b/ltx_video/pipelines/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/pipelines/crf_compressor.py b/ltx_video/pipelines/crf_compressor.py new file mode 100644 index 0000000000000000000000000000000000000000..9b9380afb7f92e0a2379c9db4cf5ce9f5a20942c --- /dev/null +++ b/ltx_video/pipelines/crf_compressor.py @@ -0,0 +1,50 @@ +import av +import torch +import io +import numpy as np + + +def _encode_single_frame(output_file, image_array: np.ndarray, crf): + container = av.open(output_file, "w", format="mp4") + try: + stream = container.add_stream( + "libx264", rate=1, options={"crf": str(crf), "preset": "veryfast"} + ) + stream.height = image_array.shape[0] + stream.width = image_array.shape[1] + av_frame = av.VideoFrame.from_ndarray(image_array, format="rgb24").reformat( + format="yuv420p" + ) + container.mux(stream.encode(av_frame)) + container.mux(stream.encode()) + finally: + container.close() + + +def _decode_single_frame(video_file): + container = av.open(video_file) + try: + stream = next(s for s in container.streams if s.type == "video") + frame = next(container.decode(stream)) + finally: + container.close() + return frame.to_ndarray(format="rgb24") + + +def compress(image: torch.Tensor, crf=29): + if crf == 0: + return image + + image_array = ( + (image[: (image.shape[0] // 2) * 2, : (image.shape[1] // 2) * 2] * 255.0) + .byte() + .cpu() + .numpy() + ) + with io.BytesIO() as output_file: + _encode_single_frame(output_file, image_array, crf) + video_bytes = output_file.getvalue() + with io.BytesIO(video_bytes) as video_file: + image_array = _decode_single_frame(video_file) + tensor = torch.tensor(image_array, dtype=image.dtype, device=image.device) / 255.0 + return tensor diff --git a/ltx_video/pipelines/pipeline_ltx_video.py b/ltx_video/pipelines/pipeline_ltx_video.py new file mode 100644 index 0000000000000000000000000000000000000000..081dfd6e652b61dc500fd333541032c8f5a78c4f --- /dev/null +++ b/ltx_video/pipelines/pipeline_ltx_video.py @@ -0,0 +1,2290 @@ +# Adapted from: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py +import copy +import inspect +import math +import re +from contextlib import nullcontext +from dataclasses import dataclass +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +import torch +import torch.nn.functional as F +from diffusers.image_processor import VaeImageProcessor +from diffusers.models import AutoencoderKL +from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput +from diffusers.schedulers import DPMSolverMultistepScheduler +from diffusers.utils import deprecate, logging +from diffusers.utils.torch_utils import randn_tensor +from einops import rearrange +from transformers import ( + T5EncoderModel, + T5Tokenizer, + AutoModelForCausalLM, + AutoProcessor, + AutoTokenizer, +) + +from ltx_video.models.autoencoders.causal_video_autoencoder import ( + CausalVideoAutoencoder, +) +from ltx_video.models.autoencoders.vae_encode import ( + get_vae_size_scale_factor, + latent_to_pixel_coords, + vae_decode, + vae_encode, +) +from ltx_video.models.transformers.symmetric_patchifier import Patchifier +from ltx_video.models.transformers.transformer3d import Transformer3DModel +from ltx_video.schedulers.rf import TimestepShifter +from ltx_video.utils.skip_layer_strategy import SkipLayerStrategy +from ltx_video.utils.prompt_enhance_utils import generate_cinematic_prompt +from ltx_video.models.autoencoders.latent_upsampler import LatentUpsampler +from ltx_video.models.autoencoders.vae_encode import ( + un_normalize_latents, + normalize_latents, +) + + +logger = logging.get_logger(__name__) # pylint: disable=invalid-name + + +ASPECT_RATIO_1024_BIN = { + "0.25": [512.0, 2048.0], + "0.28": [512.0, 1856.0], + "0.32": [576.0, 1792.0], + "0.33": [576.0, 1728.0], + "0.35": [576.0, 1664.0], + "0.4": [640.0, 1600.0], + "0.42": [640.0, 1536.0], + "0.48": [704.0, 1472.0], + "0.5": [704.0, 1408.0], + "0.52": [704.0, 1344.0], + "0.57": [768.0, 1344.0], + "0.6": [768.0, 1280.0], + "0.68": [832.0, 1216.0], + "0.72": [832.0, 1152.0], + "0.78": [896.0, 1152.0], + "0.82": [896.0, 1088.0], + "0.88": [960.0, 1088.0], + "0.94": [960.0, 1024.0], + "1.0": [1024.0, 1024.0], + "1.07": [1024.0, 960.0], + "1.13": [1088.0, 960.0], + "1.21": [1088.0, 896.0], + "1.29": [1152.0, 896.0], + "1.38": [1152.0, 832.0], + "1.46": [1216.0, 832.0], + "1.67": [1280.0, 768.0], + "1.75": [1344.0, 768.0], + "2.0": [1408.0, 704.0], + "2.09": [1472.0, 704.0], + "2.4": [1536.0, 640.0], + "2.5": [1600.0, 640.0], + "3.0": [1728.0, 576.0], + "4.0": [2048.0, 512.0], +} + +ASPECT_RATIO_512_BIN = { + "0.25": [256.0, 1024.0], + "0.28": [256.0, 928.0], + "0.32": [288.0, 896.0], + "0.33": [288.0, 864.0], + "0.35": [288.0, 832.0], + "0.4": [320.0, 800.0], + "0.42": [320.0, 768.0], + "0.48": [352.0, 736.0], + "0.5": [352.0, 704.0], + "0.52": [352.0, 672.0], + "0.57": [384.0, 672.0], + "0.6": [384.0, 640.0], + "0.68": [416.0, 608.0], + "0.72": [416.0, 576.0], + "0.78": [448.0, 576.0], + "0.82": [448.0, 544.0], + "0.88": [480.0, 544.0], + "0.94": [480.0, 512.0], + "1.0": [512.0, 512.0], + "1.07": [512.0, 480.0], + "1.13": [544.0, 480.0], + "1.21": [544.0, 448.0], + "1.29": [576.0, 448.0], + "1.38": [576.0, 416.0], + "1.46": [608.0, 416.0], + "1.67": [640.0, 384.0], + "1.75": [672.0, 384.0], + "2.0": [704.0, 352.0], + "2.09": [736.0, 352.0], + "2.4": [768.0, 320.0], + "2.5": [800.0, 320.0], + "3.0": [864.0, 288.0], + "4.0": [1024.0, 256.0], +} + + +# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps +def retrieve_timesteps( + scheduler, + num_inference_steps: Optional[int] = None, + device: Optional[Union[str, torch.device]] = None, + timesteps: Optional[List[int]] = None, + skip_initial_inference_steps: int = 0, + skip_final_inference_steps: int = 0, + **kwargs, +): + """ + Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles + custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. + + Args: + scheduler (`SchedulerMixin`): + The scheduler to get timesteps from. + num_inference_steps (`int`): + The number of diffusion steps used when generating samples with a pre-trained model. If used, + `timesteps` must be `None`. + device (`str` or `torch.device`, *optional*): + The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. + timesteps (`List[int]`, *optional*): + Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default + timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps` + must be `None`. + max_timestep ('float', *optional*, defaults to 1.0): + The initial noising level for image-to-image/video-to-video. The list if timestamps will be + truncated to start with a timestamp greater or equal to this. + + Returns: + `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the + second element is the number of inference steps. + """ + if timesteps is not None: + accepts_timesteps = "timesteps" in set( + inspect.signature(scheduler.set_timesteps).parameters.keys() + ) + if not accepts_timesteps: + raise ValueError( + f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" + f" timestep schedules. Please check whether you are using the correct scheduler." + ) + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + timesteps = scheduler.timesteps + num_inference_steps = len(timesteps) + else: + scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) + timesteps = scheduler.timesteps + + if ( + skip_initial_inference_steps < 0 + or skip_final_inference_steps < 0 + or skip_initial_inference_steps + skip_final_inference_steps + >= num_inference_steps + ): + raise ValueError( + "invalid skip inference step values: must be non-negative and the sum of skip_initial_inference_steps and skip_final_inference_steps must be less than the number of inference steps" + ) + + timesteps = timesteps[ + skip_initial_inference_steps : len(timesteps) - skip_final_inference_steps + ] + scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) + num_inference_steps = len(timesteps) + + return timesteps, num_inference_steps + + +@dataclass +class ConditioningItem: + """ + Defines a single frame-conditioning item - a single frame or a sequence of frames. + + Attributes: + media_item (torch.Tensor): shape=(b, 3, f, h, w). The media item to condition on. + media_frame_number (int): The start-frame number of the media item in the generated video. + conditioning_strength (float): The strength of the conditioning (1.0 = full conditioning). + media_x (Optional[int]): Optional left x coordinate of the media item in the generated frame. + media_y (Optional[int]): Optional top y coordinate of the media item in the generated frame. + """ + + media_item: torch.Tensor + media_frame_number: int + conditioning_strength: float + media_x: Optional[int] = None + media_y: Optional[int] = None + + + +@dataclass +class LatentConditioningItem: + latent_tensor: torch.Tensor + media_frame_number: int + conditioning_strength: float + + +class LTXVideoPipeline(DiffusionPipeline): + r""" + Pipeline for text-to-image generation using LTX-Video. + + This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the + library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) + + Args: + vae ([`AutoencoderKL`]): + Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. + text_encoder ([`T5EncoderModel`]): + Frozen text-encoder. This uses + [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the + [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant. + tokenizer (`T5Tokenizer`): + Tokenizer of class + [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer). + transformer ([`Transformer2DModel`]): + A text conditioned `Transformer2DModel` to denoise the encoded image latents. + scheduler ([`SchedulerMixin`]): + A scheduler to be used in combination with `transformer` to denoise the encoded image latents. + """ + + bad_punct_regex = re.compile( + r"[" + + "#®•©™&@·º½¾¿¡§~" + + r"\)" + + r"\(" + + r"\]" + + r"\[" + + r"\}" + + r"\{" + + r"\|" + + "\\" + + r"\/" + + r"\*" + + r"]{1,}" + ) # noqa + + _optional_components = [ + "tokenizer", + "text_encoder", + "prompt_enhancer_image_caption_model", + "prompt_enhancer_image_caption_processor", + "prompt_enhancer_llm_model", + "prompt_enhancer_llm_tokenizer", + ] + model_cpu_offload_seq = "prompt_enhancer_image_caption_model->prompt_enhancer_llm_model->text_encoder->transformer->vae" + + def __init__( + self, + tokenizer: T5Tokenizer, + text_encoder: T5EncoderModel, + vae: AutoencoderKL, + transformer: Transformer3DModel, + scheduler: DPMSolverMultistepScheduler, + patchifier: Patchifier, + prompt_enhancer_image_caption_model: AutoModelForCausalLM, + prompt_enhancer_image_caption_processor: AutoProcessor, + prompt_enhancer_llm_model: AutoModelForCausalLM, + prompt_enhancer_llm_tokenizer: AutoTokenizer, + allowed_inference_steps: Optional[List[float]] = None, + ): + super().__init__() + + self.register_modules( + tokenizer=tokenizer, + text_encoder=text_encoder, + vae=vae, + transformer=transformer, + scheduler=scheduler, + patchifier=patchifier, + prompt_enhancer_image_caption_model=prompt_enhancer_image_caption_model, + prompt_enhancer_image_caption_processor=prompt_enhancer_image_caption_processor, + prompt_enhancer_llm_model=prompt_enhancer_llm_model, + prompt_enhancer_llm_tokenizer=prompt_enhancer_llm_tokenizer, + ) + + self.video_scale_factor, self.vae_scale_factor, _ = get_vae_size_scale_factor( + self.vae + ) + self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) + + self.allowed_inference_steps = allowed_inference_steps + + def mask_text_embeddings(self, emb, mask): + if emb.shape[0] == 1: + keep_index = mask.sum().item() + return emb[:, :, :keep_index, :], keep_index + else: + masked_feature = emb * mask[:, None, :, None] + return masked_feature, emb.shape[2] + + # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt + def encode_prompt( + self, + prompt: Union[str, List[str]], + do_classifier_free_guidance: bool = True, + negative_prompt: str = "", + num_images_per_prompt: int = 1, + device: Optional[torch.device] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_attention_mask: Optional[torch.FloatTensor] = None, + negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, + text_encoder_max_tokens: int = 256, + **kwargs, + ): + r""" + Encodes the prompt into text encoder hidden states. + + Args: + prompt (`str` or `List[str]`, *optional*): + prompt to be encoded + negative_prompt (`str` or `List[str]`, *optional*): + The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds` + instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For + This should be "". + do_classifier_free_guidance (`bool`, *optional*, defaults to `True`): + whether to use classifier free guidance or not + num_images_per_prompt (`int`, *optional*, defaults to 1): + number of images that should be generated per prompt + device: (`torch.device`, *optional*): + torch device to place the resulting embeddings on + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. + """ + + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + + if device is None: + device = self._execution_device + + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + # See Section 3.1. of the paper. + max_length = ( + text_encoder_max_tokens # TPU supports only lengths multiple of 128 + ) + if prompt_embeds is None: + assert ( + self.text_encoder is not None + ), "You should provide either prompt_embeds or self.text_encoder should not be None," + text_enc_device = next(self.text_encoder.parameters()).device + prompt = self._text_preprocessing(prompt) + text_inputs = self.tokenizer( + prompt, + padding="max_length", + max_length=max_length, + truncation=True, + add_special_tokens=True, + return_tensors="pt", + ) + text_input_ids = text_inputs.input_ids + untruncated_ids = self.tokenizer( + prompt, padding="longest", return_tensors="pt" + ).input_ids + + if untruncated_ids.shape[-1] >= text_input_ids.shape[ + -1 + ] and not torch.equal(text_input_ids, untruncated_ids): + removed_text = self.tokenizer.batch_decode( + untruncated_ids[:, max_length - 1 : -1] + ) + logger.warning( + "The following part of your input was truncated because CLIP can only handle sequences up to" + f" {max_length} tokens: {removed_text}" + ) + + prompt_attention_mask = text_inputs.attention_mask + prompt_attention_mask = prompt_attention_mask.to(text_enc_device) + prompt_attention_mask = prompt_attention_mask.to(device) + + prompt_embeds = self.text_encoder( + text_input_ids.to(text_enc_device), attention_mask=prompt_attention_mask + ) + prompt_embeds = prompt_embeds[0] + + if self.text_encoder is not None: + dtype = self.text_encoder.dtype + elif self.transformer is not None: + dtype = self.transformer.dtype + else: + dtype = None + + prompt_embeds = prompt_embeds.to(dtype=dtype, device=device) + + bs_embed, seq_len, _ = prompt_embeds.shape + # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method + prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) + prompt_embeds = prompt_embeds.view( + bs_embed * num_images_per_prompt, seq_len, -1 + ) + prompt_attention_mask = prompt_attention_mask.repeat(1, num_images_per_prompt) + prompt_attention_mask = prompt_attention_mask.view( + bs_embed * num_images_per_prompt, -1 + ) + + # get unconditional embeddings for classifier free guidance + if do_classifier_free_guidance and negative_prompt_embeds is None: + uncond_tokens = self._text_preprocessing(negative_prompt) + uncond_tokens = uncond_tokens * batch_size + max_length = prompt_embeds.shape[1] + uncond_input = self.tokenizer( + uncond_tokens, + padding="max_length", + max_length=max_length, + truncation=True, + return_attention_mask=True, + add_special_tokens=True, + return_tensors="pt", + ) + negative_prompt_attention_mask = uncond_input.attention_mask + negative_prompt_attention_mask = negative_prompt_attention_mask.to( + text_enc_device + ) + + negative_prompt_embeds = self.text_encoder( + uncond_input.input_ids.to(text_enc_device), + attention_mask=negative_prompt_attention_mask, + ) + negative_prompt_embeds = negative_prompt_embeds[0] + + if do_classifier_free_guidance: + # duplicate unconditional embeddings for each generation per prompt, using mps friendly method + seq_len = negative_prompt_embeds.shape[1] + + negative_prompt_embeds = negative_prompt_embeds.to( + dtype=dtype, device=device + ) + + negative_prompt_embeds = negative_prompt_embeds.repeat( + 1, num_images_per_prompt, 1 + ) + negative_prompt_embeds = negative_prompt_embeds.view( + batch_size * num_images_per_prompt, seq_len, -1 + ) + + negative_prompt_attention_mask = negative_prompt_attention_mask.repeat( + 1, num_images_per_prompt + ) + negative_prompt_attention_mask = negative_prompt_attention_mask.view( + bs_embed * num_images_per_prompt, -1 + ) + else: + negative_prompt_embeds = None + negative_prompt_attention_mask = None + + return ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs + def prepare_extra_step_kwargs(self, generator, eta): + # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature + # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. + # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 + # and should be between [0, 1] + + accepts_eta = "eta" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + extra_step_kwargs = {} + if accepts_eta: + extra_step_kwargs["eta"] = eta + + # check if the scheduler accepts generator + accepts_generator = "generator" in set( + inspect.signature(self.scheduler.step).parameters.keys() + ) + if accepts_generator: + extra_step_kwargs["generator"] = generator + return extra_step_kwargs + + def check_inputs( + self, + prompt, + height, + width, + negative_prompt, + prompt_embeds=None, + negative_prompt_embeds=None, + prompt_attention_mask=None, + negative_prompt_attention_mask=None, + enhance_prompt=False, + ): + if height % 8 != 0 or width % 8 != 0: + raise ValueError( + f"`height` and `width` have to be divisible by 8 but are {height} and {width}." + ) + + if prompt is not None and prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" + " only forward one of the two." + ) + elif prompt is None and prompt_embeds is None: + raise ValueError( + "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." + ) + elif prompt is not None and ( + not isinstance(prompt, str) and not isinstance(prompt, list) + ): + raise ValueError( + f"`prompt` has to be of type `str` or `list` but is {type(prompt)}" + ) + + if prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if negative_prompt is not None and negative_prompt_embeds is not None: + raise ValueError( + f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" + f" {negative_prompt_embeds}. Please make sure to only forward one of the two." + ) + + if prompt_embeds is not None and prompt_attention_mask is None: + raise ValueError( + "Must provide `prompt_attention_mask` when specifying `prompt_embeds`." + ) + + if ( + negative_prompt_embeds is not None + and negative_prompt_attention_mask is None + ): + raise ValueError( + "Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`." + ) + + if prompt_embeds is not None and negative_prompt_embeds is not None: + if prompt_embeds.shape != negative_prompt_embeds.shape: + raise ValueError( + "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" + f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" + f" {negative_prompt_embeds.shape}." + ) + if prompt_attention_mask.shape != negative_prompt_attention_mask.shape: + raise ValueError( + "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but" + f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`" + f" {negative_prompt_attention_mask.shape}." + ) + + if enhance_prompt: + assert ( + self.prompt_enhancer_image_caption_model is not None + ), "Image caption model must be initialized if enhance_prompt is True" + assert ( + self.prompt_enhancer_image_caption_processor is not None + ), "Image caption processor must be initialized if enhance_prompt is True" + assert ( + self.prompt_enhancer_llm_model is not None + ), "Text prompt enhancer model must be initialized if enhance_prompt is True" + assert ( + self.prompt_enhancer_llm_tokenizer is not None + ), "Text prompt enhancer tokenizer must be initialized if enhance_prompt is True" + + def _text_preprocessing(self, text): + if not isinstance(text, (tuple, list)): + text = [text] + + def process(text: str): + text = text.strip() + return text + + return [process(t) for t in text] + + @staticmethod + def add_noise_to_image_conditioning_latents( + t: float, + init_latents: torch.Tensor, + latents: torch.Tensor, + noise_scale: float, + conditioning_mask: torch.Tensor, + generator, + eps=1e-6, + ): + """ + Add timestep-dependent noise to the hard-conditioning latents. + This helps with motion continuity, especially when conditioned on a single frame. + """ + noise = randn_tensor( + latents.shape, + generator=generator, + device=latents.device, + dtype=latents.dtype, + ) + # Add noise only to hard-conditioning latents (conditioning_mask = 1.0) + need_to_noise = (conditioning_mask > 1.0 - eps).unsqueeze(-1) + noised_latents = init_latents + noise_scale * noise * (t**2) + latents = torch.where(need_to_noise, noised_latents, latents) + return latents + + # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents + def prepare_latents( + self, + latents: torch.Tensor | None, + media_items: torch.Tensor | None, + timestep: float, + latent_shape: torch.Size | Tuple[Any, ...], + dtype: torch.dtype, + device: torch.device, + generator: torch.Generator | List[torch.Generator], + vae_per_channel_normalize: bool = True, + ): + """ + Prepare the initial latent tensor to be denoised. + The latents are either pure noise or a noised version of the encoded media items. + Args: + latents (`torch.FloatTensor` or `None`): + The latents to use (provided by the user) or `None` to create new latents. + media_items (`torch.FloatTensor` or `None`): + An image or video to be updated using img2img or vid2vid. The media item is encoded and noised. + timestep (`float`): + The timestep to noise the encoded media_items to. + latent_shape (`torch.Size`): + The target latent shape. + dtype (`torch.dtype`): + The target dtype. + device (`torch.device`): + The target device. + generator (`torch.Generator` or `List[torch.Generator]`): + Generator(s) to be used for the noising process. + vae_per_channel_normalize ('bool'): + When encoding the media_items, whether to normalize the latents per-channel. + Returns: + `torch.FloatTensor`: The latents to be used for the denoising process. This is a tensor of shape + (batch_size, num_channels, height, width). + """ + if isinstance(generator, list) and len(generator) != latent_shape[0]: + raise ValueError( + f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" + f" size of {latent_shape[0]}. Make sure the batch size matches the length of the generators." + ) + + # Initialize the latents with the given latents or encoded media item, if provided + assert ( + latents is None or media_items is None + ), "Cannot provide both latents and media_items. Please provide only one of the two." + + assert ( + latents is None and media_items is None or timestep < 1.0 + ), "Input media_item or latents are provided, but they will be replaced with noise." + + if media_items is not None: + latents = vae_encode( + media_items.to(dtype=self.vae.dtype, device=self.vae.device), + self.vae, + vae_per_channel_normalize=vae_per_channel_normalize, + ) + if latents is not None: + assert ( + latents.shape == latent_shape + ), f"Latents have to be of shape {latent_shape} but are {latents.shape}." + latents = latents.to(device=device, dtype=dtype) + + # For backward compatibility, generate in the "patchified" shape and rearrange + b, c, f, h, w = latent_shape + noise = randn_tensor( + (b, f * h * w, c), generator=generator, device=device, dtype=dtype + ) + noise = rearrange(noise, "b (f h w) c -> b c f h w", f=f, h=h, w=w) + + # scale the initial noise by the standard deviation required by the scheduler + noise = noise * self.scheduler.init_noise_sigma + + if latents is None: + latents = noise + else: + # Noise the latents to the required (first) timestep + latents = timestep * noise + (1 - timestep) * latents + + return latents + + @staticmethod + def classify_height_width_bin( + height: int, width: int, ratios: dict + ) -> Tuple[int, int]: + """Returns binned height and width.""" + ar = float(height / width) + closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar)) + default_hw = ratios[closest_ratio] + return int(default_hw[0]), int(default_hw[1]) + + @staticmethod + def resize_and_crop_tensor( + samples: torch.Tensor, new_width: int, new_height: int + ) -> torch.Tensor: + n_frames, orig_height, orig_width = samples.shape[-3:] + + # Check if resizing is needed + if orig_height != new_height or orig_width != new_width: + ratio = max(new_height / orig_height, new_width / orig_width) + resized_width = int(orig_width * ratio) + resized_height = int(orig_height * ratio) + + # Resize + samples = LTXVideoPipeline.resize_tensor( + samples, resized_height, resized_width + ) + + # Center Crop + start_x = (resized_width - new_width) // 2 + end_x = start_x + new_width + start_y = (resized_height - new_height) // 2 + end_y = start_y + new_height + samples = samples[..., start_y:end_y, start_x:end_x] + + return samples + + @staticmethod + def resize_tensor(media_items, height, width): + n_frames = media_items.shape[2] + if media_items.shape[-2:] != (height, width): + media_items = rearrange(media_items, "b c n h w -> (b n) c h w") + media_items = F.interpolate( + media_items, + size=(height, width), + mode="bilinear", + align_corners=False, + ) + media_items = rearrange(media_items, "(b n) c h w -> b c n h w", n=n_frames) + return media_items + + @torch.no_grad() + def __call__( + self, + height: int, + width: int, + num_frames: int, + frame_rate: float, + prompt: Union[str, List[str]] = None, + negative_prompt: str = "", + num_inference_steps: int = 20, + skip_initial_inference_steps: int = 0, + skip_final_inference_steps: int = 0, + timesteps: List[int] = None, + guidance_scale: Union[float, List[float]] = 4.5, + cfg_star_rescale: bool = False, + skip_layer_strategy: Optional[SkipLayerStrategy] = None, + skip_block_list: Optional[Union[List[List[int]], List[int]]] = None, + stg_scale: Union[float, List[float]] = 1.0, + rescaling_scale: Union[float, List[float]] = 0.7, + guidance_timesteps: Optional[List[int]] = None, + num_images_per_prompt: Optional[int] = 1, + eta: float = 0.0, + generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, + latents: Optional[torch.FloatTensor] = None, + prompt_embeds: Optional[torch.FloatTensor] = None, + prompt_attention_mask: Optional[torch.FloatTensor] = None, + negative_prompt_embeds: Optional[torch.FloatTensor] = None, + negative_prompt_attention_mask: Optional[torch.FloatTensor] = None, + output_type: Optional[str] = "pil", + return_dict: bool = True, + callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None, + conditioning_items: Optional[List[ConditioningItem]] = None, + decode_timestep: Union[List[float], float] = 0.0, + decode_noise_scale: Optional[List[float]] = None, + mixed_precision: bool = False, + offload_to_cpu: bool = False, + enhance_prompt: bool = False, + text_encoder_max_tokens: int = 256, + stochastic_sampling: bool = False, + media_items: Optional[torch.Tensor] = None, + tone_map_compression_ratio: float = 0.0, + **kwargs, + ) -> Union[ImagePipelineOutput, Tuple]: + """ + Function invoked when calling the pipeline for generation. + + Args: + prompt (`str` or `List[str]`, *optional*): + The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. + instead. + negative_prompt (`str` or `List[str]`, *optional*): + The prompt or prompts not to guide the image generation. If not defined, one has to pass + `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is + less than `1`). + num_inference_steps (`int`, *optional*, defaults to 100): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. If `timesteps` is provided, this parameter is ignored. + skip_initial_inference_steps (`int`, *optional*, defaults to 0): + The number of initial timesteps to skip. After calculating the timesteps, this number of timesteps will + be removed from the beginning of the timesteps list. Meaning the highest-timesteps values will not run. + skip_final_inference_steps (`int`, *optional*, defaults to 0): + The number of final timesteps to skip. After calculating the timesteps, this number of timesteps will + be removed from the end of the timesteps list. Meaning the lowest-timesteps values will not run. + timesteps (`List[int]`, *optional*): + Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps` + timesteps are used. Must be in descending order. + guidance_scale (`float`, *optional*, defaults to 4.5): + Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). + `guidance_scale` is defined as `w` of equation 2. of [Imagen + Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > + 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, + usually at the expense of lower image quality. + cfg_star_rescale (`bool`, *optional*, defaults to `False`): + If set to `True`, applies the CFG star rescale. Scales the negative prediction according to dot + product between positive and negative. + num_images_per_prompt (`int`, *optional*, defaults to 1): + The number of images to generate per prompt. + height (`int`, *optional*, defaults to self.unet.config.sample_size): + The height in pixels of the generated image. + width (`int`, *optional*, defaults to self.unet.config.sample_size): + The width in pixels of the generated image. + eta (`float`, *optional*, defaults to 0.0): + Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to + [`schedulers.DDIMScheduler`], will be ignored for others. + generator (`torch.Generator` or `List[torch.Generator]`, *optional*): + One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) + to make generation deterministic. + latents (`torch.FloatTensor`, *optional*): + Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image + generation. Can be used to tweak the same generation with different prompts. If not provided, a latents + tensor will ge generated by sampling using the supplied random `generator`. + prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not + provided, text embeddings will be generated from `prompt` input argument. + prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for text embeddings. + negative_prompt_embeds (`torch.FloatTensor`, *optional*): + Pre-generated negative text embeddings. This negative prompt should be "". If not + provided, negative_prompt_embeds will be generated from `negative_prompt` input argument. + negative_prompt_attention_mask (`torch.FloatTensor`, *optional*): + Pre-generated attention mask for negative text embeddings. + output_type (`str`, *optional*, defaults to `"pil"`): + The output format of the generate image. Choose between + [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. + return_dict (`bool`, *optional*, defaults to `True`): + Whether to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. + callback_on_step_end (`Callable`, *optional*): + A function that calls at the end of each denoising steps during the inference. The function is called + with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int, + callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by + `callback_on_step_end_tensor_inputs`. + use_resolution_binning (`bool` defaults to `True`): + If set to `True`, the requested height and width are first mapped to the closest resolutions using + `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to + the requested resolution. Useful for generating non-square images. + enhance_prompt (`bool`, *optional*, defaults to `False`): + If set to `True`, the prompt is enhanced using a LLM model. + text_encoder_max_tokens (`int`, *optional*, defaults to `256`): + The maximum number of tokens to use for the text encoder. + stochastic_sampling (`bool`, *optional*, defaults to `False`): + If set to `True`, the sampling is stochastic. If set to `False`, the sampling is deterministic. + media_items ('torch.Tensor', *optional*): + The input media item used for image-to-image / video-to-video. + tone_map_compression_ratio: compression ratio for tone mapping, defaults to 0.0. + If set to 0.0, no tone mapping is applied. If set to 1.0 - full compression is applied. + Examples: + + Returns: + [`~pipelines.ImagePipelineOutput`] or `tuple`: + If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is + returned where the first element is a list with the generated images + """ + if "mask_feature" in kwargs: + deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version." + deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False) + + is_video = kwargs.get("is_video", False) + self.check_inputs( + prompt, + height, + width, + negative_prompt, + prompt_embeds, + negative_prompt_embeds, + prompt_attention_mask, + negative_prompt_attention_mask, + ) + + # 2. Default height and width to transformer + if prompt is not None and isinstance(prompt, str): + batch_size = 1 + elif prompt is not None and isinstance(prompt, list): + batch_size = len(prompt) + else: + batch_size = prompt_embeds.shape[0] + + device = self._execution_device + + self.video_scale_factor = self.video_scale_factor if is_video else 1 + vae_per_channel_normalize = kwargs.get("vae_per_channel_normalize", True) + image_cond_noise_scale = kwargs.get("image_cond_noise_scale", 0.0) + + latent_height = height // self.vae_scale_factor + latent_width = width // self.vae_scale_factor + latent_num_frames = num_frames // self.video_scale_factor + if isinstance(self.vae, CausalVideoAutoencoder) and is_video: + latent_num_frames += 1 + latent_shape = ( + batch_size * num_images_per_prompt, + self.transformer.config.in_channels, + latent_num_frames, + latent_height, + latent_width, + ) + + # Prepare the list of denoising time-steps + + retrieve_timesteps_kwargs = {} + if isinstance(self.scheduler, TimestepShifter): + retrieve_timesteps_kwargs["samples_shape"] = latent_shape + + assert ( + skip_initial_inference_steps == 0 + or latents is not None + or media_items is not None + ), ( + f"skip_initial_inference_steps ({skip_initial_inference_steps}) is used for image-to-image/video-to-video - " + "media_item or latents should be provided." + ) + + timesteps, num_inference_steps = retrieve_timesteps( + self.scheduler, + num_inference_steps, + device, + timesteps, + skip_initial_inference_steps=skip_initial_inference_steps, + skip_final_inference_steps=skip_final_inference_steps, + **retrieve_timesteps_kwargs, + ) + + if self.allowed_inference_steps is not None: + for timestep in [round(x, 4) for x in timesteps.tolist()]: + assert ( + timestep in self.allowed_inference_steps + ), f"Invalid inference timestep {timestep}. Allowed timesteps are {self.allowed_inference_steps}." + + if guidance_timesteps: + guidance_mapping = [] + for timestep in timesteps: + indices = [ + i for i, val in enumerate(guidance_timesteps) if val <= timestep + ] + # assert len(indices) > 0, f"No guidance timestep found for {timestep}" + guidance_mapping.append( + indices[0] if len(indices) > 0 else (len(guidance_timesteps) - 1) + ) + + # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) + # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` + # corresponds to doing no classifier free guidance. + if not isinstance(guidance_scale, List): + guidance_scale = [guidance_scale] * len(timesteps) + else: + guidance_scale = [ + guidance_scale[guidance_mapping[i]] for i in range(len(timesteps)) + ] + + if not isinstance(stg_scale, List): + stg_scale = [stg_scale] * len(timesteps) + else: + stg_scale = [stg_scale[guidance_mapping[i]] for i in range(len(timesteps))] + + if not isinstance(rescaling_scale, List): + rescaling_scale = [rescaling_scale] * len(timesteps) + else: + rescaling_scale = [ + rescaling_scale[guidance_mapping[i]] for i in range(len(timesteps)) + ] + + # Normalize skip_block_list to always be None or a list of lists matching timesteps + if skip_block_list is not None: + # Convert single list to list of lists if needed + if len(skip_block_list) == 0 or not isinstance(skip_block_list[0], list): + skip_block_list = [skip_block_list] * len(timesteps) + else: + new_skip_block_list = [] + for i, timestep in enumerate(timesteps): + new_skip_block_list.append(skip_block_list[guidance_mapping[i]]) + skip_block_list = new_skip_block_list + + if enhance_prompt: + self.prompt_enhancer_image_caption_model = ( + self.prompt_enhancer_image_caption_model.to(self._execution_device) + ) + self.prompt_enhancer_llm_model = self.prompt_enhancer_llm_model.to( + self._execution_device + ) + + prompt = generate_cinematic_prompt( + self.prompt_enhancer_image_caption_model, + self.prompt_enhancer_image_caption_processor, + self.prompt_enhancer_llm_model, + self.prompt_enhancer_llm_tokenizer, + prompt, + conditioning_items, + max_new_tokens=text_encoder_max_tokens, + ) + + # --- [NOSSA ESCUTA SECRETA AQUI] --- + print("--- [LOG DO DIRETOR ASSISTENTE (PROMPT ENHANCER)] ---") + print("Prompt Original do Maestro:", kwargs.get("original_prompt_for_logging", "N/A")) # Precisamos passar isso + print("PROMPT FINAL APERFEIÇOADO (enviado para o LTX):", prompt) + print("--- [FIM DO LOG DO DIRETOR ASSISTENTE] ---") + # --- [FIM DA ESCUTA] --- + + + # 3. Encode input prompt + if self.text_encoder is not None: + self.text_encoder = self.text_encoder.to(self._execution_device) + + ( + prompt_embeds, + prompt_attention_mask, + negative_prompt_embeds, + negative_prompt_attention_mask, + ) = self.encode_prompt( + prompt, + True, + negative_prompt=negative_prompt, + num_images_per_prompt=num_images_per_prompt, + device=device, + prompt_embeds=prompt_embeds, + negative_prompt_embeds=negative_prompt_embeds, + prompt_attention_mask=prompt_attention_mask, + negative_prompt_attention_mask=negative_prompt_attention_mask, + text_encoder_max_tokens=text_encoder_max_tokens, + ) + + if offload_to_cpu and self.text_encoder is not None: + self.text_encoder = self.text_encoder.cpu() + + self.transformer = self.transformer.to(self._execution_device) + + prompt_embeds_batch = prompt_embeds + prompt_attention_mask_batch = prompt_attention_mask + negative_prompt_embeds = ( + torch.zeros_like(prompt_embeds) + if negative_prompt_embeds is None + else negative_prompt_embeds + ) + negative_prompt_attention_mask = ( + torch.zeros_like(prompt_attention_mask) + if negative_prompt_attention_mask is None + else negative_prompt_attention_mask + ) + + prompt_embeds_batch = torch.cat( + [negative_prompt_embeds, prompt_embeds, prompt_embeds], dim=0 + ) + prompt_attention_mask_batch = torch.cat( + [ + negative_prompt_attention_mask, + prompt_attention_mask, + prompt_attention_mask, + ], + dim=0, + ) + # 4. Prepare the initial latents using the provided media and conditioning items + + # Prepare the initial latents tensor, shape = (b, c, f, h, w) + latents = self.prepare_latents( + latents=latents, + media_items=media_items, + timestep=timesteps[0], + latent_shape=latent_shape, + dtype=prompt_embeds.dtype, + device=device, + generator=generator, + vae_per_channel_normalize=vae_per_channel_normalize, + ) + + # Update the latents with the conditioning items and patchify them into (b, n, c) + latents, pixel_coords, conditioning_mask, num_cond_latents = ( + self.prepare_conditioning( + conditioning_items=conditioning_items, + init_latents=latents, + num_frames=num_frames, + height=height, + width=width, + vae_per_channel_normalize=vae_per_channel_normalize, + generator=generator, + ) + ) + init_latents = latents.clone() # Used for image_cond_noise_update + + # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline + extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) + + # 7. Denoising loop + num_warmup_steps = max( + len(timesteps) - num_inference_steps * self.scheduler.order, 0 + ) + + orig_conditioning_mask = conditioning_mask + + # Befor compiling this code please be aware: + # This code might generate different input shapes if some timesteps have no STG or CFG. + # This means that the codes might need to be compiled mutliple times. + # To avoid that, use the same STG and CFG values for all timesteps. + + with self.progress_bar(total=num_inference_steps) as progress_bar: + for i, t in enumerate(timesteps): + do_classifier_free_guidance = guidance_scale[i] > 1.0 + do_spatio_temporal_guidance = stg_scale[i] > 0 + do_rescaling = rescaling_scale[i] != 1.0 + + num_conds = 1 + if do_classifier_free_guidance: + num_conds += 1 + if do_spatio_temporal_guidance: + num_conds += 1 + + if do_classifier_free_guidance and do_spatio_temporal_guidance: + indices = slice(batch_size * 0, batch_size * 3) + elif do_classifier_free_guidance: + indices = slice(batch_size * 0, batch_size * 2) + elif do_spatio_temporal_guidance: + indices = slice(batch_size * 1, batch_size * 3) + else: + indices = slice(batch_size * 1, batch_size * 2) + + # Prepare skip layer masks + skip_layer_mask: Optional[torch.Tensor] = None + if do_spatio_temporal_guidance: + if skip_block_list is not None: + skip_layer_mask = self.transformer.create_skip_layer_mask( + batch_size, num_conds, num_conds - 1, skip_block_list[i] + ) + + batch_pixel_coords = torch.cat([pixel_coords] * num_conds) + conditioning_mask = orig_conditioning_mask + if conditioning_mask is not None and is_video: + assert num_images_per_prompt == 1 + conditioning_mask = torch.cat([conditioning_mask] * num_conds) + fractional_coords = batch_pixel_coords.to(torch.float32) + fractional_coords[:, 0] = fractional_coords[:, 0] * (1.0 / frame_rate) + + if conditioning_mask is not None and image_cond_noise_scale > 0.0: + latents = self.add_noise_to_image_conditioning_latents( + t, + init_latents, + latents, + image_cond_noise_scale, + orig_conditioning_mask, + generator, + ) + + latent_model_input = ( + torch.cat([latents] * num_conds) if num_conds > 1 else latents + ) + latent_model_input = self.scheduler.scale_model_input( + latent_model_input, t + ) + + current_timestep = t + if not torch.is_tensor(current_timestep): + # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can + # This would be a good case for the `match` statement (Python 3.10+) + is_mps = latent_model_input.device.type == "mps" + if isinstance(current_timestep, float): + dtype = torch.float32 if is_mps else torch.float64 + else: + dtype = torch.int32 if is_mps else torch.int64 + current_timestep = torch.tensor( + [current_timestep], + dtype=dtype, + device=latent_model_input.device, + ) + elif len(current_timestep.shape) == 0: + current_timestep = current_timestep[None].to( + latent_model_input.device + ) + # broadcast to batch dimension in a way that's compatible with ONNX/Core ML + current_timestep = current_timestep.expand( + latent_model_input.shape[0] + ).unsqueeze(-1) + + if conditioning_mask is not None: + # Conditioning latents have an initial timestep and noising level of (1.0 - conditioning_mask) + # and will start to be denoised when the current timestep is lower than their conditioning timestep. + current_timestep = torch.min( + current_timestep, 1.0 - conditioning_mask + ) + + # Choose the appropriate context manager based on `mixed_precision` + if mixed_precision: + context_manager = torch.autocast(device.type, dtype=torch.bfloat16) + else: + context_manager = nullcontext() # Dummy context manager + + # predict noise model_output + with context_manager: + noise_pred = self.transformer( + latent_model_input.to(self.transformer.dtype), + indices_grid=fractional_coords, + encoder_hidden_states=prompt_embeds_batch[indices].to( + self.transformer.dtype + ), + encoder_attention_mask=prompt_attention_mask_batch[indices], + timestep=current_timestep, + skip_layer_mask=skip_layer_mask, + skip_layer_strategy=skip_layer_strategy, + return_dict=False, + )[0] + + # perform guidance + if do_spatio_temporal_guidance: + noise_pred_text, noise_pred_text_perturb = noise_pred.chunk( + num_conds + )[-2:] + if do_classifier_free_guidance: + noise_pred_uncond, noise_pred_text = noise_pred.chunk(num_conds)[:2] + + if cfg_star_rescale: + # Rescales the unconditional noise prediction using the projection of the conditional prediction onto it: + # α = (⟨ε_text, ε_uncond⟩ / ||ε_uncond||²), then ε_uncond ← α * ε_uncond + # where ε_text is the conditional noise prediction and ε_uncond is the unconditional one. + positive_flat = noise_pred_text.view(batch_size, -1) + negative_flat = noise_pred_uncond.view(batch_size, -1) + dot_product = torch.sum( + positive_flat * negative_flat, dim=1, keepdim=True + ) + squared_norm = ( + torch.sum(negative_flat**2, dim=1, keepdim=True) + 1e-8 + ) + alpha = dot_product / squared_norm + noise_pred_uncond = alpha * noise_pred_uncond + + noise_pred = noise_pred_uncond + guidance_scale[i] * ( + noise_pred_text - noise_pred_uncond + ) + elif do_spatio_temporal_guidance: + noise_pred = noise_pred_text + if do_spatio_temporal_guidance: + noise_pred = noise_pred + stg_scale[i] * ( + noise_pred_text - noise_pred_text_perturb + ) + if do_rescaling and stg_scale[i] > 0.0: + noise_pred_text_std = noise_pred_text.view(batch_size, -1).std( + dim=1, keepdim=True + ) + noise_pred_std = noise_pred.view(batch_size, -1).std( + dim=1, keepdim=True + ) + + factor = noise_pred_text_std / noise_pred_std + factor = rescaling_scale[i] * factor + (1 - rescaling_scale[i]) + + noise_pred = noise_pred * factor.view(batch_size, 1, 1) + + current_timestep = current_timestep[:1] + # learned sigma + if ( + self.transformer.config.out_channels // 2 + == self.transformer.config.in_channels + ): + noise_pred = noise_pred.chunk(2, dim=1)[0] + + # compute previous image: x_t -> x_t-1 + latents = self.denoising_step( + latents, + noise_pred, + current_timestep, + orig_conditioning_mask, + t, + extra_step_kwargs, + stochastic_sampling=stochastic_sampling, + ) + + # call the callback, if provided + if i == len(timesteps) - 1 or ( + (i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0 + ): + progress_bar.update() + + if callback_on_step_end is not None: + callback_on_step_end(self, i, t, {}) + + if offload_to_cpu: + self.transformer = self.transformer.cpu() + if self._execution_device == "cuda": + torch.cuda.empty_cache() + + # Remove the added conditioning latents + latents = latents[:, num_cond_latents:] + + latents = self.patchifier.unpatchify( + latents=latents, + output_height=latent_height, + output_width=latent_width, + out_channels=self.transformer.in_channels + // math.prod(self.patchifier.patch_size), + ) + if output_type != "latent": + if self.vae.decoder.timestep_conditioning: + noise = torch.randn_like(latents) + if not isinstance(decode_timestep, list): + decode_timestep = [decode_timestep] * latents.shape[0] + if decode_noise_scale is None: + decode_noise_scale = decode_timestep + elif not isinstance(decode_noise_scale, list): + decode_noise_scale = [decode_noise_scale] * latents.shape[0] + + decode_timestep = torch.tensor(decode_timestep).to(latents.device) + decode_noise_scale = torch.tensor(decode_noise_scale).to( + latents.device + )[:, None, None, None, None] + latents = ( + latents * (1 - decode_noise_scale) + noise * decode_noise_scale + ) + else: + decode_timestep = None + latents = self.tone_map_latents(latents, tone_map_compression_ratio) + image = vae_decode( + latents, + self.vae, + is_video, + vae_per_channel_normalize=kwargs["vae_per_channel_normalize"], + timestep=decode_timestep, + ) + + image = self.image_processor.postprocess(image, output_type=output_type) + + else: + image = latents + + # Offload all models + self.maybe_free_model_hooks() + + if not return_dict: + return (image,) + + return ImagePipelineOutput(images=image) + + def denoising_step( + self, + latents: torch.Tensor, + noise_pred: torch.Tensor, + current_timestep: torch.Tensor, + conditioning_mask: torch.Tensor, + t: float, + extra_step_kwargs, + t_eps=1e-6, + stochastic_sampling=False, + ): + """ + Perform the denoising step for the required tokens, based on the current timestep and + conditioning mask: + Conditioning latents have an initial timestep and noising level of (1.0 - conditioning_mask) + and will start to be denoised when the current timestep is equal or lower than their + conditioning timestep. + (hard-conditioning latents with conditioning_mask = 1.0 are never denoised) + """ + # Denoise the latents using the scheduler + denoised_latents = self.scheduler.step( + noise_pred, + t if current_timestep is None else current_timestep, + latents, + **extra_step_kwargs, + return_dict=False, + stochastic_sampling=stochastic_sampling, + )[0] + + if conditioning_mask is None: + return denoised_latents + + tokens_to_denoise_mask = (t - t_eps < (1.0 - conditioning_mask)).unsqueeze(-1) + return torch.where(tokens_to_denoise_mask, denoised_latents, latents) + + + + #patch carlex deforms + # ltx_video/pipelines/pipeline_ltx_video.py (Versão com Indentação Corrigida) + + def prepare_conditioning( + self, + conditioning_items: Optional[List[Union[ConditioningItem, "LatentConditioningItem"]]], + init_latents: torch.Tensor, + num_frames: int, + height: int, + width: int, + vae_per_channel_normalize: bool = False, + generator=None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: + """ + [MODIFICADO] Lida corretamente com ConditioningItem (pixels) e LatentConditioningItem com caminhos lógicos separados. + """ + assert isinstance(self.vae, CausalVideoAutoencoder) + + if not conditioning_items: + # Se não houver itens, apenas patchify e retorna. + init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents) + init_pixel_coords = latent_to_pixel_coords( + init_latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning + ) + return init_latents, init_pixel_coords, None, 0 + + init_conditioning_mask = torch.zeros( + init_latents[:, 0, :, :, :].shape, dtype=torch.float32, device=init_latents.device + ) + extra_conditioning_latents = [] + extra_conditioning_pixel_coords = [] + extra_conditioning_mask = [] + extra_conditioning_num_latents = 0 + + # --- [INÍCIO DA CORREÇÃO] --- + # Verifica o tipo do primeiro item para decidir o modo de processamento. + is_latent_mode = hasattr(conditioning_items[0], 'latent_tensor') + + if is_latent_mode: + # --- CAMINHO 1: Processamento exclusivo para LatentConditioningItem --- + for item in conditioning_items: + media_item_latents = item.latent_tensor.to(dtype=init_latents.dtype, device=init_latents.device) + media_frame_number = item.media_frame_number + strength = item.conditioning_strength + n_latent_frames = media_item_latents.shape[2] + + if media_frame_number == 0: + # Para latentes, assumimos que eles preenchem o quadro, sem posicionamento espacial. + f_l, h_l, w_l = media_item_latents.shape[-3:] + init_latents[:, :, :f_l, :h_l, :w_l] = torch.lerp(init_latents[:, :, :f_l, :h_l, :w_l], media_item_latents, strength) + init_conditioning_mask[:, :f_l, :h_l, :w_l] = strength + else: + # Lógica simplificada para frames não-iniciais de latentes + noise = randn_tensor(media_item_latents.shape, generator=generator, device=media_item_latents.device, dtype=media_item_latents.dtype) + media_item_latents = torch.lerp(noise, media_item_latents, strength) + + patched_latents, latent_coords = self.patchifier.patchify(latents=media_item_latents) + pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning) + pixel_coords[:, 0] += media_frame_number + extra_conditioning_num_latents += patched_latents.shape[1] + + new_mask = torch.full(patched_latents.shape[:2], strength, dtype=torch.float32, device=init_latents.device) + + extra_conditioning_latents.append(patched_latents) + extra_conditioning_pixel_coords.append(pixel_coords) + extra_conditioning_mask.append(new_mask) + + else: + # --- CAMINHO 2: Processamento exclusivo para ConditioningItem (pixels) --- + for item in conditioning_items: + if not isinstance(item, ConditioningItem): continue + + item = self._resize_conditioning_item(item, height, width) + media_item_latents = vae_encode( + item.media_item.to(dtype=self.vae.dtype, device=self.vae.device), + self.vae, vae_per_channel_normalize=vae_per_channel_normalize + ).to(dtype=init_latents.dtype) + + media_frame_number = item.media_frame_number + strength = item.conditioning_strength + n_pixel_frames = item.media_item.shape[2] + + if media_frame_number == 0: + media_item_latents, l_x, l_y = self._get_latent_spatial_position(media_item_latents, item, height, width, strip_latent_border=True) + f_l, h_l, w_l = media_item_latents.shape[-3:] + init_latents[:, :, :f_l, l_y:l_y+h_l, l_x:l_x+w_l] = torch.lerp(init_latents[:, :, :f_l, l_y:l_y+h_l, l_x:l_x+w_l], media_item_latents, strength) + init_conditioning_mask[:, :f_l, l_y:l_y+h_l, l_x:l_x+w_l] = strength + else: + if n_pixel_frames > 1: + (init_latents, init_conditioning_mask, media_item_latents) = self._handle_non_first_conditioning_sequence( + init_latents, init_conditioning_mask, media_item_latents, media_frame_number, strength + ) + if media_item_latents is not None: + noise = randn_tensor(media_item_latents.shape, generator=generator, device=media_item_latents.device, dtype=media_item_latents.dtype) + media_item_latents = torch.lerp(noise, media_item_latents, strength) + patched_latents, latent_coords = self.patchifier.patchify(latents=media_item_latents) + pixel_coords = latent_to_pixel_coords(latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning) + pixel_coords[:, 0] += media_frame_number + extra_conditioning_num_latents += patched_latents.shape[1] + new_mask = torch.full(patched_latents.shape[:2], strength, dtype=torch.float32, device=init_latents.device) + extra_conditioning_latents.append(patched_latents) + extra_conditioning_pixel_coords.append(pixel_coords) + extra_conditioning_mask.append(new_mask) + + # --- [FIM DA CORREÇÃO] --- + + # O resto da função (patchify final e concatenação) permanece o mesmo + init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents) + init_pixel_coords = latent_to_pixel_coords( + init_latent_coords, self.vae, causal_fix=self.transformer.config.causal_temporal_positioning + ) + init_conditioning_mask, _ = self.patchifier.patchify(latents=init_conditioning_mask.unsqueeze(1)) + init_conditioning_mask = init_conditioning_mask.squeeze(-1) + if extra_conditioning_latents: + init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1) + init_pixel_coords = torch.cat([*extra_conditioning_pixel_coords, init_pixel_coords], dim=2) + init_conditioning_mask = torch.cat([*extra_conditioning_mask, init_conditioning_mask], dim=1) + if self.transformer.use_tpu_flash_attention: + init_latents = init_latents[:, :-extra_conditioning_num_latents] + init_pixel_coords = init_pixel_coords[:, :, :-extra_conditioning_num_latents] + init_conditioning_mask = init_conditioning_mask[:, :-extra_conditioning_num_latents] + + return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents + + # --- [INÍCIO DA SEÇÃO CORRIGIDA] --- + def prepare_conditioning12( + self, + conditioning_items: Optional[List[Union[ConditioningItem, "LatentConditioningItem"]]], + init_latents: torch.Tensor, + num_frames: int, + height: int, + width: int, + vae_per_channel_normalize: bool = False, + generator=None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: + """ + Prepara os tokens de condicionamento. + [MODIFICADO] Lida corretamente com ConditioningItem (pixels) e LatentConditioningItem. + """ + assert isinstance(self.vae, CausalVideoAutoencoder) + + if conditioning_items: + init_conditioning_mask = torch.zeros( + init_latents[:, 0, :, :, :].shape, + dtype=torch.float32, + device=init_latents.device, + ) + extra_conditioning_latents = [] + extra_conditioning_pixel_coords = [] + extra_conditioning_mask = [] + extra_conditioning_num_latents = 0 + + for conditioning_item in conditioning_items: + media_item_latents = None + + # Usamos hasattr para evitar importação circular. + is_latent_item = hasattr(conditioning_item, 'latent_tensor') + + if is_latent_item: + # Se for um item latente, pulamos o pré-processamento de pixels. + media_item_latents = conditioning_item.latent_tensor.to(dtype=init_latents.dtype, device=init_latents.device) + media_frame_number = conditioning_item.media_frame_number + strength = conditioning_item.conditioning_strength + n_frames = media_item_latents.shape[2] + + elif isinstance(conditioning_item, ConditioningItem): + # Se for um item de pixel, seguimos o caminho original. + conditioning_item = self._resize_conditioning_item(conditioning_item, height, width) + media_item = conditioning_item.media_item + media_frame_number = conditioning_item.media_frame_number + strength = conditioning_item.conditioning_strength + b, c, n_frames, h, w = media_item.shape + + media_item_latents = vae_encode( + media_item.to(dtype=self.vae.dtype, device=self.vae.device), + self.vae, + vae_per_channel_normalize=vae_per_channel_normalize, + ).to(dtype=init_latents.dtype) + + else: # CORREÇÃO: Adiciona um indented block aqui + continue # Pula itens que não são de nenhum tipo conhecido + + if media_item_latents is None: + continue + + # A lógica unificada a partir daqui + if media_frame_number == 0: + pos_item = None if is_latent_item else conditioning_item + + media_item_latents, l_x, l_y = self._get_latent_spatial_position( + media_item_latents, + pos_item, + height, + width, + strip_latent_border=True, + ) + b, c_l, f_l, h_l, w_l = media_item_latents.shape + + init_latents[:, :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l] = ( + torch.lerp( + init_latents[:, :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l], + media_item_latents, + strength, + ) + ) + init_conditioning_mask[ + :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l + ] = strength + else: + if n_frames > 1: + ( + init_latents, + init_conditioning_mask, + media_item_latents, + ) = self._handle_non_first_conditioning_sequence( + init_latents, + init_conditioning_mask, + media_item_latents, + media_frame_number, + strength, + ) + + if media_item_latents is not None: + noise = randn_tensor( + media_item_latents.shape, + generator=generator, + device=media_item_latents.device, + dtype=media_item_latents.dtype, + ) + media_item_latents = torch.lerp( + noise, media_item_latents, strength + ) + media_item_latents, latent_coords = self.patchifier.patchify( + latents=media_item_latents + ) + pixel_coords = latent_to_pixel_coords( + latent_coords, + self.vae, + causal_fix=self.transformer.config.causal_temporal_positioning, + ) + pixel_coords[:, 0] += media_frame_number + extra_conditioning_num_latents += media_item_latents.shape[1] + conditioning_mask = torch.full( + media_item_latents.shape[:2], + strength, + dtype=torch.float32, + device=init_latents.device, + ) + extra_conditioning_latents.append(media_item_latents) + extra_conditioning_pixel_coords.append(pixel_coords) + extra_conditioning_mask.append(conditioning_mask) + + init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents) + init_pixel_coords = latent_to_pixel_coords( + init_latent_coords, + self.vae, + causal_fix=self.transformer.config.causal_temporal_positioning, + ) + + if not conditioning_items: + return init_latents, init_pixel_coords, None, 0 + + init_conditioning_mask, _ = self.patchifier.patchify(latents=init_conditioning_mask.unsqueeze(1)) + init_conditioning_mask = init_conditioning_mask.squeeze(-1) + + if extra_conditioning_latents: + init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1) + init_pixel_coords = torch.cat([*extra_conditioning_pixel_coords, init_pixel_coords], dim=2) + init_conditioning_mask = torch.cat([*extra_conditioning_mask, init_conditioning_mask], dim=1) + if self.transformer.use_tpu_flash_attention: + init_latents = init_latents[:, :-extra_conditioning_num_latents] + init_pixel_coords = init_pixel_coords[:, :, :-extra_conditioning_num_latents] + init_conditioning_mask = init_conditioning_mask[:, :-extra_conditioning_num_latents] + + return init_latents, init_pixel_coords, init_conditioning_mask, extra_conditioning_num_latents + + # Se não houver conditioning_items, retorna os valores iniciais + init_latents, init_latent_coords = self.patchifier.patchify(latents=init_latents) + init_pixel_coords = latent_to_pixel_coords( + init_latent_coords, + self.vae, + causal_fix=self.transformer.config.causal_temporal_positioning, + ) + return init_latents, init_pixel_coords, None, 0 + + def _get_latent_spatial_position( + self, + latents: torch.Tensor, + conditioning_item: Optional[ConditioningItem], # Tornamos opcional + height: int, + width: int, + strip_latent_border, + ): + """ + [MODIFICADO] Se conditioning_item for None (caso de item latente), assume posição central. + """ + scale = self.vae_scale_factor + + # --- [INÍCIO DA CORREÇÃO] --- + # A verificação de None deve vir PRIMEIRO. + if conditioning_item is None: + # Caso de um item latente. Não há posicionamento espacial, + # então assumimos que ele preenche todo o quadro. + x_start, y_start = 0, 0 + w, h = width, height + else: + # Caso de um item de pixel, com possível posicionamento. + h, w = conditioning_item.media_item.shape[-2:] + assert (h <= height and w <= width), f"Conditioning item size {h}x{w} is larger than target size {height}x{width}" + assert h % scale == 0 and w % scale == 0 + x_start, y_start = conditioning_item.media_x, conditioning_item.media_y + x_start = (width - w) // 2 if x_start is None else x_start + y_start = (height - h) // 2 if y_start is None else y_start + # --- [FIM DA CORREÇÃO] --- + + x_end, y_end = x_start + w, y_start + h + assert (x_end <= width and y_end <= height), f"Conditioning item {x_start}:{x_end}x{y_start}:{y_end} is out of bounds for target size {width}x{height}" + + if strip_latent_border: + if x_start > 0: + x_start += scale + latents = latents[:, :, :, :, 1:] + if y_start > 0: + y_start += scale + latents = latents[:, :, :, 1:, :] + if x_end < width: + latents = latents[:, :, :, :, :-1] + if y_end < height: + latents = latents[:, :, :, :-1, :] + + return latents, x_start // scale, y_start // scale + + def _get_latent_spatial_position1( + self, + latents: torch.Tensor, + conditioning_item: Optional[ConditioningItem], + height: int, + width: int, + strip_latent_border, + ): + """ + [MODIFICADO] Se conditioning_item for None (caso de item latente), assume posição central. + """ + scale = self.vae_scale_factor + + if conditioning_item is None: + x_start, y_start = 0, 0 + w, h = width, height + else: + h, w = conditioning_item.media_item.shape[-2:] + assert (h <= height and w <= width), f"Conditioning item size {h}x{w} is larger than target size {height}x{width}" + assert h % scale == 0 and w % scale == 0 + x_start, y_start = conditioning_item.media_x, conditioning_item.media_y + x_start = (width - w) // 2 if x_start is None else x_start + y_start = (height - h) // 2 if y_start is None else y_start + + x_end, y_end = x_start + w, y_start + h + assert (x_end <= width and y_end <= height), f"Conditioning item {x_start}:{x_end}x{y_start}:{y_end} is out of bounds for target size {width}x{height}" + + if strip_latent_border: + if x_start > 0: + x_start += scale + latents = latents[:, :, :, :, 1:] + if y_start > 0: + y_start += scale + latents = latents[:, :, :, 1:, :] + if x_end < width: + latents = latents[:, :, :, :, :-1] + if y_end < height: + latents = latents[:, :, :, :-1, :] + + return latents, x_start // scale, y_start // scale + + # --- [FIM DA SEÇÃO CORRIGIDA] --- + + # ... (O resto da classe LTXVideoPipeline continua como antes) + + def prepare_conditioning1( + self, + conditioning_items: Optional[List[ConditioningItem]], + init_latents: torch.Tensor, + num_frames: int, + height: int, + width: int, + vae_per_channel_normalize: bool = False, + generator=None, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: + """ + Prepare conditioning tokens based on the provided conditioning items. + + This method encodes provided conditioning items (video frames or single frames) into latents + and integrates them with the initial latent tensor. It also calculates corresponding pixel + coordinates, a mask indicating the influence of conditioning latents, and the total number of + conditioning latents. + + Args: + conditioning_items (Optional[List[ConditioningItem]]): A list of ConditioningItem objects. + init_latents (torch.Tensor): The initial latent tensor of shape (b, c, f_l, h_l, w_l), where + `f_l` is the number of latent frames, and `h_l` and `w_l` are latent spatial dimensions. + num_frames, height, width: The dimensions of the generated video. + vae_per_channel_normalize (bool, optional): Whether to normalize channels during VAE encoding. + Defaults to `False`. + generator: The random generator + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]: + - `init_latents` (torch.Tensor): The updated latent tensor including conditioning latents, + patchified into (b, n, c) shape. + - `init_pixel_coords` (torch.Tensor): The pixel coordinates corresponding to the updated + latent tensor. + - `conditioning_mask` (torch.Tensor): A mask indicating the conditioning-strength of each + latent token. + - `num_cond_latents` (int): The total number of latent tokens added from conditioning items. + + Raises: + AssertionError: If input shapes, dimensions, or conditions for applying conditioning are invalid. + """ + assert isinstance(self.vae, CausalVideoAutoencoder) + + if conditioning_items: + batch_size, _, num_latent_frames = init_latents.shape[:3] + + init_conditioning_mask = torch.zeros( + init_latents[:, 0, :, :, :].shape, + dtype=torch.float32, + device=init_latents.device, + ) + + extra_conditioning_latents = [] + extra_conditioning_pixel_coords = [] + extra_conditioning_mask = [] + extra_conditioning_num_latents = 0 # Number of extra conditioning latents added (should be removed before decoding) + + # Process each conditioning item + for conditioning_item in conditioning_items: + conditioning_item = self._resize_conditioning_item( + conditioning_item, height, width + ) + media_item = conditioning_item.media_item + media_frame_number = conditioning_item.media_frame_number + strength = conditioning_item.conditioning_strength + assert media_item.ndim == 5 # (b, c, f, h, w) + b, c, n_frames, h, w = media_item.shape + assert ( + height == h and width == w + ) or media_frame_number == 0, f"Dimensions do not match: {height}x{width} != {h}x{w} - allowed only when media_frame_number == 0" + assert n_frames % 8 == 1 + assert ( + media_frame_number >= 0 + and media_frame_number + n_frames <= num_frames + ) + + # Encode the provided conditioning media item + media_item_latents = vae_encode( + media_item.to(dtype=self.vae.dtype, device=self.vae.device), + self.vae, + vae_per_channel_normalize=vae_per_channel_normalize, + ).to(dtype=init_latents.dtype) + + # Handle the different conditioning cases + if media_frame_number == 0: + # Get the target spatial position of the latent conditioning item + media_item_latents, l_x, l_y = self._get_latent_spatial_position( + media_item_latents, + conditioning_item, + height, + width, + strip_latent_border=True, + ) + b, c_l, f_l, h_l, w_l = media_item_latents.shape + + # First frame or sequence - just update the initial noise latents and the mask + init_latents[:, :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l] = ( + torch.lerp( + init_latents[:, :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l], + media_item_latents, + strength, + ) + ) + init_conditioning_mask[ + :, :f_l, l_y : l_y + h_l, l_x : l_x + w_l + ] = strength + else: + # Non-first frame or sequence + if n_frames > 1: + # Handle non-first sequence. + # Encoded latents are either fully consumed, or the prefix is handled separately below. + ( + init_latents, + init_conditioning_mask, + media_item_latents, + ) = self._handle_non_first_conditioning_sequence( + init_latents, + init_conditioning_mask, + media_item_latents, + media_frame_number, + strength, + ) + + # Single frame or sequence-prefix latents + if media_item_latents is not None: + noise = randn_tensor( + media_item_latents.shape, + generator=generator, + device=media_item_latents.device, + dtype=media_item_latents.dtype, + ) + + media_item_latents = torch.lerp( + noise, media_item_latents, strength + ) + + # Patchify the extra conditioning latents and calculate their pixel coordinates + media_item_latents, latent_coords = self.patchifier.patchify( + latents=media_item_latents + ) + pixel_coords = latent_to_pixel_coords( + latent_coords, + self.vae, + causal_fix=self.transformer.config.causal_temporal_positioning, + ) + + # Update the frame numbers to match the target frame number + pixel_coords[:, 0] += media_frame_number + extra_conditioning_num_latents += media_item_latents.shape[1] + + conditioning_mask = torch.full( + media_item_latents.shape[:2], + strength, + dtype=torch.float32, + device=init_latents.device, + ) + + extra_conditioning_latents.append(media_item_latents) + extra_conditioning_pixel_coords.append(pixel_coords) + extra_conditioning_mask.append(conditioning_mask) + + # Patchify the updated latents and calculate their pixel coordinates + init_latents, init_latent_coords = self.patchifier.patchify( + latents=init_latents + ) + init_pixel_coords = latent_to_pixel_coords( + init_latent_coords, + self.vae, + causal_fix=self.transformer.config.causal_temporal_positioning, + ) + + if not conditioning_items: + return init_latents, init_pixel_coords, None, 0 + + init_conditioning_mask, _ = self.patchifier.patchify( + latents=init_conditioning_mask.unsqueeze(1) + ) + init_conditioning_mask = init_conditioning_mask.squeeze(-1) + + if extra_conditioning_latents: + # Stack the extra conditioning latents, pixel coordinates and mask + init_latents = torch.cat([*extra_conditioning_latents, init_latents], dim=1) + init_pixel_coords = torch.cat( + [*extra_conditioning_pixel_coords, init_pixel_coords], dim=2 + ) + init_conditioning_mask = torch.cat( + [*extra_conditioning_mask, init_conditioning_mask], dim=1 + ) + + if self.transformer.use_tpu_flash_attention: + # When flash attention is used, keep the original number of tokens by removing + # tokens from the end. + init_latents = init_latents[:, :-extra_conditioning_num_latents] + init_pixel_coords = init_pixel_coords[ + :, :, :-extra_conditioning_num_latents + ] + init_conditioning_mask = init_conditioning_mask[ + :, :-extra_conditioning_num_latents + ] + + return ( + init_latents, + init_pixel_coords, + init_conditioning_mask, + extra_conditioning_num_latents, + ) + + @staticmethod + def _resize_conditioning_item( + conditioning_item: ConditioningItem, + height: int, + width: int, + ): + if conditioning_item.media_x or conditioning_item.media_y: + raise ValueError( + "Provide media_item in the target size for spatial conditioning." + ) + new_conditioning_item = copy.copy(conditioning_item) + new_conditioning_item.media_item = LTXVideoPipeline.resize_tensor( + conditioning_item.media_item, height, width + ) + return new_conditioning_item + + def _get_latent_spatial_position( + self, + latents: torch.Tensor, + conditioning_item: ConditioningItem, + height: int, + width: int, + strip_latent_border, + ): + """ + Get the spatial position of the conditioning item in the latent space. + If requested, strip the conditioning latent borders that do not align with target borders. + (border latents look different then other latents and might confuse the model) + """ + scale = self.vae_scale_factor + h, w = conditioning_item.media_item.shape[-2:] + assert ( + h <= height and w <= width + ), f"Conditioning item size {h}x{w} is larger than target size {height}x{width}" + assert h % scale == 0 and w % scale == 0 + + # Compute the start and end spatial positions of the media item + x_start, y_start = conditioning_item.media_x, conditioning_item.media_y + x_start = (width - w) // 2 if x_start is None else x_start + y_start = (height - h) // 2 if y_start is None else y_start + x_end, y_end = x_start + w, y_start + h + assert ( + x_end <= width and y_end <= height + ), f"Conditioning item {x_start}:{x_end}x{y_start}:{y_end} is out of bounds for target size {width}x{height}" + + if strip_latent_border: + # Strip one latent from left/right and/or top/bottom, update x, y accordingly + if x_start > 0: + x_start += scale + latents = latents[:, :, :, :, 1:] + + if y_start > 0: + y_start += scale + latents = latents[:, :, :, 1:, :] + + if x_end < width: + latents = latents[:, :, :, :, :-1] + + if y_end < height: + latents = latents[:, :, :, :-1, :] + + return latents, x_start // scale, y_start // scale + + @staticmethod + def _handle_non_first_conditioning_sequence( + init_latents: torch.Tensor, + init_conditioning_mask: torch.Tensor, + latents: torch.Tensor, + media_frame_number: int, + strength: float, + num_prefix_latent_frames: int = 2, + prefix_latents_mode: str = "concat", + prefix_soft_conditioning_strength: float = 0.15, + ): + """ + Special handling for a conditioning sequence that does not start on the first frame. + The special handling is required to allow a short encoded video to be used as middle + (or last) sequence in a longer video. + Args: + init_latents (torch.Tensor): The initial noise latents to be updated. + init_conditioning_mask (torch.Tensor): The initial conditioning mask to be updated. + latents (torch.Tensor): The encoded conditioning item. + media_frame_number (int): The target frame number of the first frame in the conditioning sequence. + strength (float): The conditioning strength for the conditioning latents. + num_prefix_latent_frames (int, optional): The length of the sequence prefix, to be handled + separately. Defaults to 2. + prefix_latents_mode (str, optional): Special treatment for prefix (boundary) latents. + - "drop": Drop the prefix latents. + - "soft": Use the prefix latents, but with soft-conditioning + - "concat": Add the prefix latents as extra tokens (like single frames) + prefix_soft_conditioning_strength (float, optional): The strength of the soft-conditioning for + the prefix latents, relevant if `prefix_latents_mode` is "soft". Defaults to 0.1. + + """ + f_l = latents.shape[2] + f_l_p = num_prefix_latent_frames + assert f_l >= f_l_p + assert media_frame_number % 8 == 0 + if f_l > f_l_p: + # Insert the conditioning latents **excluding the prefix** into the sequence + f_l_start = media_frame_number // 8 + f_l_p + f_l_end = f_l_start + f_l - f_l_p + init_latents[:, :, f_l_start:f_l_end] = torch.lerp( + init_latents[:, :, f_l_start:f_l_end], + latents[:, :, f_l_p:], + strength, + ) + # Mark these latent frames as conditioning latents + init_conditioning_mask[:, f_l_start:f_l_end] = strength + + # Handle the prefix-latents + if prefix_latents_mode == "soft": + if f_l_p > 1: + # Drop the first (single-frame) latent and soft-condition the remaining prefix + f_l_start = media_frame_number // 8 + 1 + f_l_end = f_l_start + f_l_p - 1 + strength = min(prefix_soft_conditioning_strength, strength) + init_latents[:, :, f_l_start:f_l_end] = torch.lerp( + init_latents[:, :, f_l_start:f_l_end], + latents[:, :, 1:f_l_p], + strength, + ) + # Mark these latent frames as conditioning latents + init_conditioning_mask[:, f_l_start:f_l_end] = strength + latents = None # No more latents to handle + elif prefix_latents_mode == "drop": + # Drop the prefix latents + latents = None + elif prefix_latents_mode == "concat": + # Pass-on the prefix latents to be handled as extra conditioning frames + latents = latents[:, :, :f_l_p] + else: + raise ValueError(f"Invalid prefix_latents_mode: {prefix_latents_mode}") + return ( + init_latents, + init_conditioning_mask, + latents, + ) + + def trim_conditioning_sequence( + self, start_frame: int, sequence_num_frames: int, target_num_frames: int + ): + """ + Trim a conditioning sequence to the allowed number of frames. + + Args: + start_frame (int): The target frame number of the first frame in the sequence. + sequence_num_frames (int): The number of frames in the sequence. + target_num_frames (int): The target number of frames in the generated video. + + Returns: + int: updated sequence length + """ + scale_factor = self.video_scale_factor + num_frames = min(sequence_num_frames, target_num_frames - start_frame) + # Trim down to a multiple of temporal_scale_factor frames plus 1 + num_frames = (num_frames - 1) // scale_factor * scale_factor + 1 + return num_frames + + @staticmethod + def tone_map_latents( + latents: torch.Tensor, + compression: float, + ) -> torch.Tensor: + """ + Applies a non-linear tone-mapping function to latent values to reduce their dynamic range + in a perceptually smooth way using a sigmoid-based compression. + + This is useful for regularizing high-variance latents or for conditioning outputs + during generation, especially when controlling dynamic behavior with a `compression` factor. + + Parameters: + ---------- + latents : torch.Tensor + Input latent tensor with arbitrary shape. Expected to be roughly in [-1, 1] or [0, 1] range. + compression : float + Compression strength in the range [0, 1]. + - 0.0: No tone-mapping (identity transform) + - 1.0: Full compression effect + + Returns: + ------- + torch.Tensor + The tone-mapped latent tensor of the same shape as input. + """ + if not (0 <= compression <= 1): + raise ValueError("Compression must be in the range [0, 1]") + + # Remap [0-1] to [0-0.75] and apply sigmoid compression in one shot + scale_factor = compression * 0.75 + abs_latents = torch.abs(latents) + + # Sigmoid compression: sigmoid shifts large values toward 0.2, small values stay ~1.0 + # When scale_factor=0, sigmoid term vanishes, when scale_factor=0.75, full effect + sigmoid_term = torch.sigmoid(4.0 * scale_factor * (abs_latents - 1.0)) + scales = 1.0 - 0.8 * scale_factor * sigmoid_term + + filtered = latents * scales + return filtered + + +def adain_filter_latent( + latents: torch.Tensor, reference_latents: torch.Tensor, factor=1.0 +): + """ + Applies Adaptive Instance Normalization (AdaIN) to a latent tensor based on + statistics from a reference latent tensor. + + Args: + latent (torch.Tensor): Input latents to normalize + reference_latent (torch.Tensor): The reference latents providing style statistics. + factor (float): Blending factor between original and transformed latent. + Range: -10.0 to 10.0, Default: 1.0 + + Returns: + torch.Tensor: The transformed latent tensor + """ + result = latents.clone() + + for i in range(latents.size(0)): + for c in range(latents.size(1)): + r_sd, r_mean = torch.std_mean( + reference_latents[i, c], dim=None + ) # index by original dim order + i_sd, i_mean = torch.std_mean(result[i, c], dim=None) + + result[i, c] = ((result[i, c] - i_mean) / i_sd) * r_sd + r_mean + + result = torch.lerp(latents, result, factor) + return result + + +class LTXMultiScalePipeline: + def _upsample_latents( + self, latest_upsampler: LatentUpsampler, latents: torch.Tensor + ): + assert latents.device == latest_upsampler.device + + latents = un_normalize_latents( + latents, self.vae, vae_per_channel_normalize=True + ) + upsampled_latents = latest_upsampler(latents) + upsampled_latents = normalize_latents( + upsampled_latents, self.vae, vae_per_channel_normalize=True + ) + return upsampled_latents + + def __init__( + self, video_pipeline: LTXVideoPipeline, latent_upsampler: LatentUpsampler + ): + self.video_pipeline = video_pipeline + self.vae = video_pipeline.vae + self.latent_upsampler = latent_upsampler + + def __call__( + self, + downscale_factor: float, + first_pass: dict, + second_pass: dict, + *args: Any, + **kwargs: Any, + ) -> Any: + original_kwargs = kwargs.copy() + original_output_type = kwargs["output_type"] + original_width = kwargs["width"] + original_height = kwargs["height"] + + x_width = int(kwargs["width"] * downscale_factor) + downscaled_width = x_width - (x_width % self.video_pipeline.vae_scale_factor) + x_height = int(kwargs["height"] * downscale_factor) + downscaled_height = x_height - (x_height % self.video_pipeline.vae_scale_factor) + + kwargs["output_type"] = "latent" + kwargs["width"] = downscaled_width + kwargs["height"] = downscaled_height + kwargs.update(**first_pass) + result = self.video_pipeline(*args, **kwargs) + latents = result.images + + upsampled_latents = self._upsample_latents(self.latent_upsampler, latents) + upsampled_latents = adain_filter_latent( + latents=upsampled_latents, reference_latents=latents + ) + + kwargs = original_kwargs + + kwargs["latents"] = upsampled_latents + kwargs["output_type"] = original_output_type + kwargs["width"] = downscaled_width * 2 + kwargs["height"] = downscaled_height * 2 + kwargs.update(**second_pass) + + result = self.video_pipeline(*args, **kwargs) + if original_output_type != "latent": + num_frames = result.images.shape[2] + videos = rearrange(result.images, "b c f h w -> (b f) c h w") + + videos = F.interpolate( + videos, + size=(original_height, original_width), + mode="bilinear", + align_corners=False, + ) + videos = rearrange(videos, "(b f) c h w -> b c f h w", f=num_frames) + result.images = videos + + return result diff --git a/ltx_video/schedulers/__init__.py b/ltx_video/schedulers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/schedulers/rf.py b/ltx_video/schedulers/rf.py new file mode 100644 index 0000000000000000000000000000000000000000..c7d2ab3426645941efa71ec0c5d866d9ea9c90d4 --- /dev/null +++ b/ltx_video/schedulers/rf.py @@ -0,0 +1,386 @@ +import math +from abc import ABC, abstractmethod +from dataclasses import dataclass +from typing import Callable, Optional, Tuple, Union +import json +import os +from pathlib import Path + +import torch +from diffusers.configuration_utils import ConfigMixin, register_to_config +from diffusers.schedulers.scheduling_utils import SchedulerMixin +from diffusers.utils import BaseOutput +from torch import Tensor +from safetensors import safe_open + + +from ltx_video.utils.torch_utils import append_dims + +from ltx_video.utils.diffusers_config_mapping import ( + diffusers_and_ours_config_mapping, + make_hashable_key, +) + + +def linear_quadratic_schedule(num_steps, threshold_noise=0.025, linear_steps=None): + if num_steps == 1: + return torch.tensor([1.0]) + if linear_steps is None: + linear_steps = num_steps // 2 + linear_sigma_schedule = [ + i * threshold_noise / linear_steps for i in range(linear_steps) + ] + threshold_noise_step_diff = linear_steps - threshold_noise * num_steps + quadratic_steps = num_steps - linear_steps + quadratic_coef = threshold_noise_step_diff / (linear_steps * quadratic_steps**2) + linear_coef = threshold_noise / linear_steps - 2 * threshold_noise_step_diff / ( + quadratic_steps**2 + ) + const = quadratic_coef * (linear_steps**2) + quadratic_sigma_schedule = [ + quadratic_coef * (i**2) + linear_coef * i + const + for i in range(linear_steps, num_steps) + ] + sigma_schedule = linear_sigma_schedule + quadratic_sigma_schedule + [1.0] + sigma_schedule = [1.0 - x for x in sigma_schedule] + return torch.tensor(sigma_schedule[:-1]) + + +def simple_diffusion_resolution_dependent_timestep_shift( + samples_shape: torch.Size, + timesteps: Tensor, + n: int = 32 * 32, +) -> Tensor: + if len(samples_shape) == 3: + _, m, _ = samples_shape + elif len(samples_shape) in [4, 5]: + m = math.prod(samples_shape[2:]) + else: + raise ValueError( + "Samples must have shape (b, t, c), (b, c, h, w) or (b, c, f, h, w)" + ) + snr = (timesteps / (1 - timesteps)) ** 2 + shift_snr = torch.log(snr) + 2 * math.log(m / n) + shifted_timesteps = torch.sigmoid(0.5 * shift_snr) + + return shifted_timesteps + + +def time_shift(mu: float, sigma: float, t: Tensor): + return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma) + + +def get_normal_shift( + n_tokens: int, + min_tokens: int = 1024, + max_tokens: int = 4096, + min_shift: float = 0.95, + max_shift: float = 2.05, +) -> Callable[[float], float]: + m = (max_shift - min_shift) / (max_tokens - min_tokens) + b = min_shift - m * min_tokens + return m * n_tokens + b + + +def strech_shifts_to_terminal(shifts: Tensor, terminal=0.1): + """ + Stretch a function (given as sampled shifts) so that its final value matches the given terminal value + using the provided formula. + + Parameters: + - shifts (Tensor): The samples of the function to be stretched (PyTorch Tensor). + - terminal (float): The desired terminal value (value at the last sample). + + Returns: + - Tensor: The stretched shifts such that the final value equals `terminal`. + """ + if shifts.numel() == 0: + raise ValueError("The 'shifts' tensor must not be empty.") + + # Ensure terminal value is valid + if terminal <= 0 or terminal >= 1: + raise ValueError("The terminal value must be between 0 and 1 (exclusive).") + + # Transform the shifts using the given formula + one_minus_z = 1 - shifts + scale_factor = one_minus_z[-1] / (1 - terminal) + stretched_shifts = 1 - (one_minus_z / scale_factor) + + return stretched_shifts + + +def sd3_resolution_dependent_timestep_shift( + samples_shape: torch.Size, + timesteps: Tensor, + target_shift_terminal: Optional[float] = None, +) -> Tensor: + """ + Shifts the timestep schedule as a function of the generated resolution. + + In the SD3 paper, the authors empirically how to shift the timesteps based on the resolution of the target images. + For more details: https://arxiv.org/pdf/2403.03206 + + In Flux they later propose a more dynamic resolution dependent timestep shift, see: + https://github.com/black-forest-labs/flux/blob/87f6fff727a377ea1c378af692afb41ae84cbe04/src/flux/sampling.py#L66 + + + Args: + samples_shape (torch.Size): The samples batch shape (batch_size, channels, height, width) or + (batch_size, channels, frame, height, width). + timesteps (Tensor): A batch of timesteps with shape (batch_size,). + target_shift_terminal (float): The target terminal value for the shifted timesteps. + + Returns: + Tensor: The shifted timesteps. + """ + if len(samples_shape) == 3: + _, m, _ = samples_shape + elif len(samples_shape) in [4, 5]: + m = math.prod(samples_shape[2:]) + else: + raise ValueError( + "Samples must have shape (b, t, c), (b, c, h, w) or (b, c, f, h, w)" + ) + + shift = get_normal_shift(m) + time_shifts = time_shift(shift, 1, timesteps) + if target_shift_terminal is not None: # Stretch the shifts to the target terminal + time_shifts = strech_shifts_to_terminal(time_shifts, target_shift_terminal) + return time_shifts + + +class TimestepShifter(ABC): + @abstractmethod + def shift_timesteps(self, samples_shape: torch.Size, timesteps: Tensor) -> Tensor: + pass + + +@dataclass +class RectifiedFlowSchedulerOutput(BaseOutput): + """ + Output class for the scheduler's step function output. + + Args: + prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the + denoising loop. + pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): + The predicted denoised sample (x_{0}) based on the model output from the current timestep. + `pred_original_sample` can be used to preview progress or for guidance. + """ + + prev_sample: torch.FloatTensor + pred_original_sample: Optional[torch.FloatTensor] = None + + +class RectifiedFlowScheduler(SchedulerMixin, ConfigMixin, TimestepShifter): + order = 1 + + @register_to_config + def __init__( + self, + num_train_timesteps=1000, + shifting: Optional[str] = None, + base_resolution: int = 32**2, + target_shift_terminal: Optional[float] = None, + sampler: Optional[str] = "Uniform", + shift: Optional[float] = None, + ): + super().__init__() + self.init_noise_sigma = 1.0 + self.num_inference_steps = None + self.sampler = sampler + self.shifting = shifting + self.base_resolution = base_resolution + self.target_shift_terminal = target_shift_terminal + self.timesteps = self.sigmas = self.get_initial_timesteps( + num_train_timesteps, shift=shift + ) + self.shift = shift + + def get_initial_timesteps( + self, num_timesteps: int, shift: Optional[float] = None + ) -> Tensor: + if self.sampler == "Uniform": + return torch.linspace(1, 1 / num_timesteps, num_timesteps) + elif self.sampler == "LinearQuadratic": + return linear_quadratic_schedule(num_timesteps) + elif self.sampler == "Constant": + assert ( + shift is not None + ), "Shift must be provided for constant time shift sampler." + return time_shift( + shift, 1, torch.linspace(1, 1 / num_timesteps, num_timesteps) + ) + + def shift_timesteps(self, samples_shape: torch.Size, timesteps: Tensor) -> Tensor: + if self.shifting == "SD3": + return sd3_resolution_dependent_timestep_shift( + samples_shape, timesteps, self.target_shift_terminal + ) + elif self.shifting == "SimpleDiffusion": + return simple_diffusion_resolution_dependent_timestep_shift( + samples_shape, timesteps, self.base_resolution + ) + return timesteps + + def set_timesteps( + self, + num_inference_steps: Optional[int] = None, + samples_shape: Optional[torch.Size] = None, + timesteps: Optional[Tensor] = None, + device: Union[str, torch.device] = None, + ): + """ + Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference. + If `timesteps` are provided, they will be used instead of the scheduled timesteps. + + Args: + num_inference_steps (`int` *optional*): The number of diffusion steps used when generating samples. + samples_shape (`torch.Size` *optional*): The samples batch shape, used for shifting. + timesteps ('torch.Tensor' *optional*): Specific timesteps to use instead of scheduled timesteps. + device (`Union[str, torch.device]`, *optional*): The device to which the timesteps tensor will be moved. + """ + if timesteps is not None and num_inference_steps is not None: + raise ValueError( + "You cannot provide both `timesteps` and `num_inference_steps`." + ) + if timesteps is None: + num_inference_steps = min( + self.config.num_train_timesteps, num_inference_steps + ) + timesteps = self.get_initial_timesteps( + num_inference_steps, shift=self.shift + ).to(device) + timesteps = self.shift_timesteps(samples_shape, timesteps) + else: + timesteps = torch.Tensor(timesteps).to(device) + num_inference_steps = len(timesteps) + self.timesteps = timesteps + self.num_inference_steps = num_inference_steps + self.sigmas = self.timesteps + + @staticmethod + def from_pretrained(pretrained_model_path: Union[str, os.PathLike]): + pretrained_model_path = Path(pretrained_model_path) + if pretrained_model_path.is_file(): + comfy_single_file_state_dict = {} + with safe_open(pretrained_model_path, framework="pt", device="cpu") as f: + metadata = f.metadata() + for k in f.keys(): + comfy_single_file_state_dict[k] = f.get_tensor(k) + configs = json.loads(metadata["config"]) + config = configs["scheduler"] + del comfy_single_file_state_dict + + elif pretrained_model_path.is_dir(): + diffusers_noise_scheduler_config_path = ( + pretrained_model_path / "scheduler" / "scheduler_config.json" + ) + + with open(diffusers_noise_scheduler_config_path, "r") as f: + scheduler_config = json.load(f) + hashable_config = make_hashable_key(scheduler_config) + if hashable_config in diffusers_and_ours_config_mapping: + config = diffusers_and_ours_config_mapping[hashable_config] + return RectifiedFlowScheduler.from_config(config) + + def scale_model_input( + self, sample: torch.FloatTensor, timestep: Optional[int] = None + ) -> torch.FloatTensor: + # pylint: disable=unused-argument + """ + Ensures interchangeability with schedulers that need to scale the denoising model input depending on the + current timestep. + + Args: + sample (`torch.FloatTensor`): input sample + timestep (`int`, optional): current timestep + + Returns: + `torch.FloatTensor`: scaled input sample + """ + return sample + + def step( + self, + model_output: torch.FloatTensor, + timestep: torch.FloatTensor, + sample: torch.FloatTensor, + return_dict: bool = True, + stochastic_sampling: Optional[bool] = False, + **kwargs, + ) -> Union[RectifiedFlowSchedulerOutput, Tuple]: + """ + Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion + process from the learned model outputs (most often the predicted noise). + z_{t_1} = z_t - Delta_t * v + The method finds the next timestep that is lower than the input timestep(s) and denoises the latents + to that level. The input timestep(s) are not required to be one of the predefined timesteps. + + Args: + model_output (`torch.FloatTensor`): + The direct output from learned diffusion model - the velocity, + timestep (`float`): + The current discrete timestep in the diffusion chain (global or per-token). + sample (`torch.FloatTensor`): + A current latent tokens to be de-noised. + return_dict (`bool`, *optional*, defaults to `True`): + Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`. + stochastic_sampling (`bool`, *optional*, defaults to `False`): + Whether to use stochastic sampling for the sampling process. + + Returns: + [`~schedulers.scheduling_utils.RectifiedFlowSchedulerOutput`] or `tuple`: + If return_dict is `True`, [`~schedulers.rf_scheduler.RectifiedFlowSchedulerOutput`] is returned, + otherwise a tuple is returned where the first element is the sample tensor. + """ + if self.num_inference_steps is None: + raise ValueError( + "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" + ) + t_eps = 1e-6 # Small epsilon to avoid numerical issues in timestep values + + timesteps_padded = torch.cat( + [self.timesteps, torch.zeros(1, device=self.timesteps.device)] + ) + + # Find the next lower timestep(s) and compute the dt from the current timestep(s) + if timestep.ndim == 0: + # Global timestep case + lower_mask = timesteps_padded < timestep - t_eps + lower_timestep = timesteps_padded[lower_mask][0] # Closest lower timestep + dt = timestep - lower_timestep + + else: + # Per-token case + assert timestep.ndim == 2 + lower_mask = timesteps_padded[:, None, None] < timestep[None] - t_eps + lower_timestep = lower_mask * timesteps_padded[:, None, None] + lower_timestep, _ = lower_timestep.max(dim=0) + dt = (timestep - lower_timestep)[..., None] + + # Compute previous sample + if stochastic_sampling: + x0 = sample - timestep[..., None] * model_output + next_timestep = timestep[..., None] - dt + prev_sample = self.add_noise(x0, torch.randn_like(sample), next_timestep) + else: + prev_sample = sample - dt * model_output + + if not return_dict: + return (prev_sample,) + + return RectifiedFlowSchedulerOutput(prev_sample=prev_sample) + + def add_noise( + self, + original_samples: torch.FloatTensor, + noise: torch.FloatTensor, + timesteps: torch.FloatTensor, + ) -> torch.FloatTensor: + sigmas = timesteps + sigmas = append_dims(sigmas, original_samples.ndim) + alphas = 1 - sigmas + noisy_samples = alphas * original_samples + sigmas * noise + return noisy_samples diff --git a/ltx_video/utils/__init__.py b/ltx_video/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/ltx_video/utils/diffusers_config_mapping.py b/ltx_video/utils/diffusers_config_mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..53c0082d182617f6f84eab9c849f7ef0224becb8 --- /dev/null +++ b/ltx_video/utils/diffusers_config_mapping.py @@ -0,0 +1,174 @@ +def make_hashable_key(dict_key): + def convert_value(value): + if isinstance(value, list): + return tuple(value) + elif isinstance(value, dict): + return tuple(sorted((k, convert_value(v)) for k, v in value.items())) + else: + return value + + return tuple(sorted((k, convert_value(v)) for k, v in dict_key.items())) + + +DIFFUSERS_SCHEDULER_CONFIG = { + "_class_name": "FlowMatchEulerDiscreteScheduler", + "_diffusers_version": "0.32.0.dev0", + "base_image_seq_len": 1024, + "base_shift": 0.95, + "invert_sigmas": False, + "max_image_seq_len": 4096, + "max_shift": 2.05, + "num_train_timesteps": 1000, + "shift": 1.0, + "shift_terminal": 0.1, + "use_beta_sigmas": False, + "use_dynamic_shifting": True, + "use_exponential_sigmas": False, + "use_karras_sigmas": False, +} +DIFFUSERS_TRANSFORMER_CONFIG = { + "_class_name": "LTXVideoTransformer3DModel", + "_diffusers_version": "0.32.0.dev0", + "activation_fn": "gelu-approximate", + "attention_bias": True, + "attention_head_dim": 64, + "attention_out_bias": True, + "caption_channels": 4096, + "cross_attention_dim": 2048, + "in_channels": 128, + "norm_elementwise_affine": False, + "norm_eps": 1e-06, + "num_attention_heads": 32, + "num_layers": 28, + "out_channels": 128, + "patch_size": 1, + "patch_size_t": 1, + "qk_norm": "rms_norm_across_heads", +} +DIFFUSERS_VAE_CONFIG = { + "_class_name": "AutoencoderKLLTXVideo", + "_diffusers_version": "0.32.0.dev0", + "block_out_channels": [128, 256, 512, 512], + "decoder_causal": False, + "encoder_causal": True, + "in_channels": 3, + "latent_channels": 128, + "layers_per_block": [4, 3, 3, 3, 4], + "out_channels": 3, + "patch_size": 4, + "patch_size_t": 1, + "resnet_norm_eps": 1e-06, + "scaling_factor": 1.0, + "spatio_temporal_scaling": [True, True, True, False], +} + +OURS_SCHEDULER_CONFIG = { + "_class_name": "RectifiedFlowScheduler", + "_diffusers_version": "0.25.1", + "num_train_timesteps": 1000, + "shifting": "SD3", + "base_resolution": None, + "target_shift_terminal": 0.1, +} + +OURS_TRANSFORMER_CONFIG = { + "_class_name": "Transformer3DModel", + "_diffusers_version": "0.25.1", + "_name_or_path": "PixArt-alpha/PixArt-XL-2-256x256", + "activation_fn": "gelu-approximate", + "attention_bias": True, + "attention_head_dim": 64, + "attention_type": "default", + "caption_channels": 4096, + "cross_attention_dim": 2048, + "double_self_attention": False, + "dropout": 0.0, + "in_channels": 128, + "norm_elementwise_affine": False, + "norm_eps": 1e-06, + "norm_num_groups": 32, + "num_attention_heads": 32, + "num_embeds_ada_norm": 1000, + "num_layers": 28, + "num_vector_embeds": None, + "only_cross_attention": False, + "out_channels": 128, + "project_to_2d_pos": True, + "upcast_attention": False, + "use_linear_projection": False, + "qk_norm": "rms_norm", + "standardization_norm": "rms_norm", + "positional_embedding_type": "rope", + "positional_embedding_theta": 10000.0, + "positional_embedding_max_pos": [20, 2048, 2048], + "timestep_scale_multiplier": 1000, +} +OURS_VAE_CONFIG = { + "_class_name": "CausalVideoAutoencoder", + "dims": 3, + "in_channels": 3, + "out_channels": 3, + "latent_channels": 128, + "blocks": [ + ["res_x", 4], + ["compress_all", 1], + ["res_x_y", 1], + ["res_x", 3], + ["compress_all", 1], + ["res_x_y", 1], + ["res_x", 3], + ["compress_all", 1], + ["res_x", 3], + ["res_x", 4], + ], + "scaling_factor": 1.0, + "norm_layer": "pixel_norm", + "patch_size": 4, + "latent_log_var": "uniform", + "use_quant_conv": False, + "causal_decoder": False, +} + + +diffusers_and_ours_config_mapping = { + make_hashable_key(DIFFUSERS_SCHEDULER_CONFIG): OURS_SCHEDULER_CONFIG, + make_hashable_key(DIFFUSERS_TRANSFORMER_CONFIG): OURS_TRANSFORMER_CONFIG, + make_hashable_key(DIFFUSERS_VAE_CONFIG): OURS_VAE_CONFIG, +} + + +TRANSFORMER_KEYS_RENAME_DICT = { + "proj_in": "patchify_proj", + "time_embed": "adaln_single", + "norm_q": "q_norm", + "norm_k": "k_norm", +} + + +VAE_KEYS_RENAME_DICT = { + "decoder.up_blocks.3.conv_in": "decoder.up_blocks.7", + "decoder.up_blocks.3.upsamplers.0": "decoder.up_blocks.8", + "decoder.up_blocks.3": "decoder.up_blocks.9", + "decoder.up_blocks.2.upsamplers.0": "decoder.up_blocks.5", + "decoder.up_blocks.2.conv_in": "decoder.up_blocks.4", + "decoder.up_blocks.2": "decoder.up_blocks.6", + "decoder.up_blocks.1.upsamplers.0": "decoder.up_blocks.2", + "decoder.up_blocks.1": "decoder.up_blocks.3", + "decoder.up_blocks.0": "decoder.up_blocks.1", + "decoder.mid_block": "decoder.up_blocks.0", + "encoder.down_blocks.3": "encoder.down_blocks.8", + "encoder.down_blocks.2.downsamplers.0": "encoder.down_blocks.7", + "encoder.down_blocks.2": "encoder.down_blocks.6", + "encoder.down_blocks.1.downsamplers.0": "encoder.down_blocks.4", + "encoder.down_blocks.1.conv_out": "encoder.down_blocks.5", + "encoder.down_blocks.1": "encoder.down_blocks.3", + "encoder.down_blocks.0.conv_out": "encoder.down_blocks.2", + "encoder.down_blocks.0.downsamplers.0": "encoder.down_blocks.1", + "encoder.down_blocks.0": "encoder.down_blocks.0", + "encoder.mid_block": "encoder.down_blocks.9", + "conv_shortcut.conv": "conv_shortcut", + "resnets": "res_blocks", + "norm3": "norm3.norm", + "latents_mean": "per_channel_statistics.mean-of-means", + "latents_std": "per_channel_statistics.std-of-means", +} diff --git a/ltx_video/utils/prompt_enhance_utils.py b/ltx_video/utils/prompt_enhance_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9010517282925f8f3d2343829347f309e5c0e41a --- /dev/null +++ b/ltx_video/utils/prompt_enhance_utils.py @@ -0,0 +1,226 @@ +import logging +from typing import Union, List, Optional + +import torch +from PIL import Image + +logger = logging.getLogger(__name__) # pylint: disable=invalid-name + +T2V_CINEMATIC_PROMPT = """You are an expert cinematic director with many award winning movies, When writing prompts based on the user input, focus on detailed, chronological descriptions of actions and scenes. +Include specific movements, appearances, camera angles, and environmental details - all in a single flowing paragraph. +Start directly with the action, and keep descriptions literal and precise. +Think like a cinematographer describing a shot list. +Do not change the user input intent, just enhance it. +Keep within 150 words. +For best results, build your prompts using this structure: +Start with main action in a single sentence +Add specific details about movements and gestures +Describe character/object appearances precisely +Include background and environment details +Specify camera angles and movements +Describe lighting and colors +Note any changes or sudden events +Do not exceed the 150 word limit! +Output the enhanced prompt only. +""" + +I2V_CINEMATIC_PROMPT = """You are an expert cinematic director with many award winning movies, When writing prompts based on the user input, focus on detailed, chronological descriptions of actions and scenes. +Include specific movements, appearances, camera angles, and environmental details - all in a single flowing paragraph. +Start directly with the action, and keep descriptions literal and precise. +Think like a cinematographer describing a shot list. +Keep within 150 words. +For best results, build your prompts using this structure: +Describe the image first and then add the user input. Image description should be in first priority! Align to the image caption if it contradicts the user text input. +Start with main action in a single sentence +Add specific details about movements and gestures +Describe character/object appearances precisely +Include background and environment details +Specify camera angles and movements +Describe lighting and colors +Note any changes or sudden events +Align to the image caption if it contradicts the user text input. +Do not exceed the 150 word limit! +Output the enhanced prompt only. +""" + + +def tensor_to_pil(tensor): + # Ensure tensor is in range [-1, 1] + assert tensor.min() >= -1 and tensor.max() <= 1 + + # Convert from [-1, 1] to [0, 1] + tensor = (tensor + 1) / 2 + + # Rearrange from [C, H, W] to [H, W, C] + tensor = tensor.permute(1, 2, 0) + + # Convert to numpy array and then to uint8 range [0, 255] + numpy_image = (tensor.cpu().numpy() * 255).astype("uint8") + + # Convert to PIL Image + return Image.fromarray(numpy_image) + + +def generate_cinematic_prompt( + image_caption_model, + image_caption_processor, + prompt_enhancer_model, + prompt_enhancer_tokenizer, + prompt: Union[str, List[str]], + conditioning_items: Optional[List] = None, + max_new_tokens: int = 256, +) -> List[str]: + prompts = [prompt] if isinstance(prompt, str) else prompt + + if conditioning_items is None: + prompts = _generate_t2v_prompt( + prompt_enhancer_model, + prompt_enhancer_tokenizer, + prompts, + max_new_tokens, + T2V_CINEMATIC_PROMPT, + ) + else: + if len(conditioning_items) > 1 or conditioning_items[0].media_frame_number != 0: + logger.warning( + "prompt enhancement does only support unconditional or first frame of conditioning items, returning original prompts" + ) + return prompts + + first_frame_conditioning_item = conditioning_items[0] + first_frames = _get_first_frames_from_conditioning_item( + first_frame_conditioning_item + ) + + assert len(first_frames) == len( + prompts + ), "Number of conditioning frames must match number of prompts" + + prompts = _generate_i2v_prompt( + image_caption_model, + image_caption_processor, + prompt_enhancer_model, + prompt_enhancer_tokenizer, + prompts, + first_frames, + max_new_tokens, + I2V_CINEMATIC_PROMPT, + ) + + return prompts + + +def _get_first_frames_from_conditioning_item(conditioning_item) -> List[Image.Image]: + frames_tensor = conditioning_item.media_item + return [ + tensor_to_pil(frames_tensor[i, :, 0, :, :]) + for i in range(frames_tensor.shape[0]) + ] + + +def _generate_t2v_prompt( + prompt_enhancer_model, + prompt_enhancer_tokenizer, + prompts: List[str], + max_new_tokens: int, + system_prompt: str, +) -> List[str]: + messages = [ + [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": f"user_prompt: {p}"}, + ] + for p in prompts + ] + + texts = [ + prompt_enhancer_tokenizer.apply_chat_template( + m, tokenize=False, add_generation_prompt=True + ) + for m in messages + ] + model_inputs = prompt_enhancer_tokenizer(texts, return_tensors="pt").to( + prompt_enhancer_model.device + ) + + return _generate_and_decode_prompts( + prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens + ) + + +def _generate_i2v_prompt( + image_caption_model, + image_caption_processor, + prompt_enhancer_model, + prompt_enhancer_tokenizer, + prompts: List[str], + first_frames: List[Image.Image], + max_new_tokens: int, + system_prompt: str, +) -> List[str]: + image_captions = _generate_image_captions( + image_caption_model, image_caption_processor, first_frames + ) + + messages = [ + [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": f"user_prompt: {p}\nimage_caption: {c}"}, + ] + for p, c in zip(prompts, image_captions) + ] + + texts = [ + prompt_enhancer_tokenizer.apply_chat_template( + m, tokenize=False, add_generation_prompt=True + ) + for m in messages + ] + model_inputs = prompt_enhancer_tokenizer(texts, return_tensors="pt").to( + prompt_enhancer_model.device + ) + + return _generate_and_decode_prompts( + prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens + ) + + +def _generate_image_captions( + image_caption_model, + image_caption_processor, + images: List[Image.Image], + system_prompt: str = "", +) -> List[str]: + image_caption_prompts = [system_prompt] * len(images) + inputs = image_caption_processor( + image_caption_prompts, images, return_tensors="pt" + ).to(image_caption_model.device) + + with torch.inference_mode(): + generated_ids = image_caption_model.generate( + input_ids=inputs["input_ids"], + pixel_values=inputs["pixel_values"], + max_new_tokens=1024, + do_sample=False, + num_beams=3, + ) + + return image_caption_processor.batch_decode(generated_ids, skip_special_tokens=True) + + +def _generate_and_decode_prompts( + prompt_enhancer_model, prompt_enhancer_tokenizer, model_inputs, max_new_tokens: int +) -> List[str]: + with torch.inference_mode(): + outputs = prompt_enhancer_model.generate( + **model_inputs, max_new_tokens=max_new_tokens + ) + generated_ids = [ + output_ids[len(input_ids) :] + for input_ids, output_ids in zip(model_inputs.input_ids, outputs) + ] + decoded_prompts = prompt_enhancer_tokenizer.batch_decode( + generated_ids, skip_special_tokens=True + ) + + return decoded_prompts diff --git a/ltx_video/utils/skip_layer_strategy.py b/ltx_video/utils/skip_layer_strategy.py new file mode 100644 index 0000000000000000000000000000000000000000..30f9016e1cf2abbe62360775e914fa63876e4cf7 --- /dev/null +++ b/ltx_video/utils/skip_layer_strategy.py @@ -0,0 +1,8 @@ +from enum import Enum, auto + + +class SkipLayerStrategy(Enum): + AttentionSkip = auto() + AttentionValues = auto() + Residual = auto() + TransformerBlock = auto() diff --git a/ltx_video/utils/torch_utils.py b/ltx_video/utils/torch_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..991b07c36269ef4dafb88a85834f2596647ba816 --- /dev/null +++ b/ltx_video/utils/torch_utils.py @@ -0,0 +1,25 @@ +import torch +from torch import nn + + +def append_dims(x: torch.Tensor, target_dims: int) -> torch.Tensor: + """Appends dimensions to the end of a tensor until it has target_dims dimensions.""" + dims_to_append = target_dims - x.ndim + if dims_to_append < 0: + raise ValueError( + f"input has {x.ndim} dims but target_dims is {target_dims}, which is less" + ) + elif dims_to_append == 0: + return x + return x[(...,) + (None,) * dims_to_append] + + +class Identity(nn.Module): + """A placeholder identity operator that is argument-insensitive.""" + + def __init__(self, *args, **kwargs) -> None: # pylint: disable=unused-argument + super().__init__() + + # pylint: disable=unused-argument + def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor: + return x diff --git a/mmaudio/LICENSE.txt b/mmaudio/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/mmaudio/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/mmaudio/README.md b/mmaudio/README.md new file mode 100644 index 0000000000000000000000000000000000000000..964c76ea7d615f5287e9283e23df316dad8bfdd4 --- /dev/null +++ b/mmaudio/README.md @@ -0,0 +1,135 @@ +# 🛠️ helpers/ - Ferramentas de IA de Terceiros Adaptadas para ADUC-SDR + +Esta pasta contém implementações adaptadas de modelos e utilitários de IA de terceiros, que servem como "especialistas" ou "ferramentas" de baixo nível para a arquitetura ADUC-SDR. + +**IMPORTANTE:** O conteúdo desta pasta é de autoria de seus respectivos idealizadores e desenvolvedores originais. Esta pasta **NÃO FAZ PARTE** do projeto principal ADUC-SDR em termos de sua arquitetura inovadora. Ela serve como um repositório para as **dependências diretas e modificadas** que os `DeformesXDEngines` (os estágios do "foguete" ADUC-SDR) invocam para realizar tarefas específicas (geração de imagem, vídeo, áudio). + +As modificações realizadas nos arquivos aqui presentes visam principalmente: +1. **Adaptação de Interfaces:** Padronizar as interfaces para que se encaixem no fluxo de orquestração do ADUC-SDR. +2. **Gerenciamento de Recursos:** Integrar lógicas de carregamento/descarregamento de modelos (GPU management) e configurações via arquivos YAML. +3. **Otimização de Fluxo:** Ajustar as pipelines para aceitar formatos de entrada mais eficientes (ex: tensores pré-codificados em vez de caminhos de mídia, pulando etapas de codificação/decodificação redundantes). + +--- + +## 📄 Licenciamento + +O conteúdo original dos projetos listados abaixo é licenciado sob a **Licença Apache 2.0**, ou outra licença especificada pelos autores originais. Todas as modificações e o uso desses arquivos dentro da estrutura `helpers/` do projeto ADUC-SDR estão em conformidade com os termos da **Licença Apache 2.0**. + +As licenças originais dos projetos podem ser encontradas nas suas respectivas fontes ou nos subdiretórios `incl_licenses/` dentro de cada módulo adaptado. + +--- + +## 🛠️ API dos Helpers e Guia de Uso + +Esta seção detalha como cada helper (agente especialista) deve ser utilizado dentro do ecossistema ADUC-SDR. Todos os agentes são instanciados como **singletons** no `hardware_manager.py` para garantir o gerenciamento centralizado de recursos de GPU. + +### **gemini_helpers.py (GeminiAgent)** + +* **Propósito:** Atua como o "Oráculo de Síntese Adaptativo", responsável por todas as tarefas de processamento de linguagem natural, como criação de storyboards, geração de prompts, e tomada de decisões narrativas. +* **Singleton Instance:** `gemini_agent_singleton` +* **Construtor:** `GeminiAgent()` + * Lê `configs/gemini_config.yaml` para obter o nome do modelo, parâmetros de inferência e caminhos de templates de prompt. A chave da API é lida da variável de ambiente `GEMINI_API_KEY`. +* **Métodos Públicos:** + * `generate_storyboard(prompt: str, num_keyframes: int, ref_image_paths: list[str])` + * **Inputs:** + * `prompt`: A ideia geral do filme (string). + * `num_keyframes`: O número de cenas a serem geradas (int). + * `ref_image_paths`: Lista de caminhos para as imagens de referência (list[str]). + * **Output:** `tuple[list[str], str]` (Uma tupla contendo a lista de strings do storyboard e um relatório textual da operação). + * `select_keyframes_from_pool(storyboard: list, base_image_paths: list[str], pool_image_paths: list[str])` + * **Inputs:** + * `storyboard`: A lista de strings do storyboard gerado. + * `base_image_paths`: Imagens de referência base (list[str]). + * `pool_image_paths`: O "banco de imagens" de onde selecionar (list[str]). + * **Output:** `tuple[list[str], str]` (Uma tupla contendo a lista de caminhos de imagens selecionadas e um relatório textual). + * `get_anticipatory_keyframe_prompt(...)` + * **Inputs:** Contexto narrativo e visual para gerar um prompt de imagem. + * **Output:** `tuple[str, str]` (Uma tupla contendo o prompt gerado para o modelo de imagem e um relatório textual). + * `get_initial_motion_prompt(...)` + * **Inputs:** Contexto narrativo e visual para a primeira transição de vídeo. + * **Output:** `tuple[str, str]` (Uma tupla contendo o prompt de movimento gerado e um relatório textual). + * `get_transition_decision(...)` + * **Inputs:** Contexto narrativo e visual para uma transição de vídeo intermediária. + * **Output:** `tuple[dict, str]` (Uma tupla contendo um dicionário `{"transition_type": "...", "motion_prompt": "..."}` e um relatório textual). + * `generate_audio_prompts(...)` + * **Inputs:** Contexto narrativo global. + * **Output:** `tuple[dict, str]` (Uma tupla contendo um dicionário `{"music_prompt": "...", "sfx_prompt": "..."}` e um relatório textual). + +### **flux_kontext_helpers.py (FluxPoolManager)** + +* **Propósito:** Especialista em geração de imagens de alta qualidade (keyframes) usando a pipeline FluxKontext. Gerencia um pool de workers para otimizar o uso de múltiplas GPUs. +* **Singleton Instance:** `flux_kontext_singleton` +* **Construtor:** `FluxPoolManager(device_ids: list[str], flux_config_file: str)` + * Lê `configs/flux_config.yaml`. +* **Método Público:** + * `generate_image(prompt: str, reference_images: list[Image.Image], width: int, height: int, seed: int = 42, callback: callable = None)` + * **Inputs:** + * `prompt`: Prompt textual para guiar a geração (string). + * `reference_images`: Lista de objetos `PIL.Image` como referência visual. + * `width`, `height`: Dimensões da imagem de saída (int). + * `seed`: Semente para reprodutibilidade (int). + * `callback`: Função de callback opcional para monitorar o progresso. + * **Output:** `PIL.Image.Image` (O objeto da imagem gerada). + +### **dreamo_helpers.py (DreamOAgent)** + +* **Propósito:** Especialista em geração de imagens de alta qualidade (keyframes) usando a pipeline DreamO, com capacidades avançadas de edição e estilo a partir de referências. +* **Singleton Instance:** `dreamo_agent_singleton` +* **Construtor:** `DreamOAgent(device_id: str = None)` + * Lê `configs/dreamo_config.yaml`. +* **Método Público:** + * `generate_image(prompt: str, reference_images: list[Image.Image], width: int, height: int)` + * **Inputs:** + * `prompt`: Prompt textual para guiar a geração (string). + * `reference_images`: Lista de objetos `PIL.Image` como referência visual. A lógica interna atribui a primeira imagem como `style` e as demais como `ip`. + * `width`, `height`: Dimensões da imagem de saída (int). + * **Output:** `PIL.Image.Image` (O objeto da imagem gerada). + +### **ltx_manager_helpers.py (LtxPoolManager)** + +* **Propósito:** Especialista na geração de fragmentos de vídeo no espaço latente usando a pipeline LTX-Video. Gerencia um pool de workers para otimizar o uso de múltiplas GPUs. +* **Singleton Instance:** `ltx_manager_singleton` +* **Construtor:** `LtxPoolManager(device_ids: list[str], ltx_model_config_file: str, ltx_global_config_file: str)` + * Lê o `ltx_global_config_file` e o `ltx_model_config_file` para configurar a pipeline. +* **Método Público:** + * `generate_latent_fragment(**kwargs)` + * **Inputs:** Dicionário de keyword arguments (`kwargs`) contendo todos os parâmetros da pipeline LTX, incluindo: + * `height`, `width`: Dimensões do vídeo (int). + * `video_total_frames`: Número total de frames a serem gerados (int). + * `video_fps`: Frames por segundo (int). + * `motion_prompt`: Prompt de movimento (string). + * `conditioning_items_data`: Lista de objetos `LatentConditioningItem` contendo os tensores latentes de condição. + * `guidance_scale`, `stg_scale`, `num_inference_steps`, etc. + * **Output:** `tuple[torch.Tensor, tuple]` (Uma tupla contendo o tensor latente gerado e os valores de padding utilizados). + +### **mmaudio_helper.py (MMAudioAgent)** + +* **Propósito:** Especialista em geração de áudio para um determinado fragmento de vídeo. +* **Singleton Instance:** `mmaudio_agent_singleton` +* **Construtor:** `MMAudioAgent(workspace_dir: str, device_id: str = None, mmaudio_config_file: str)` + * Lê `configs/mmaudio_config.yaml`. +* **Método Público:** + * `generate_audio_for_video(video_path: str, prompt: str, negative_prompt: str, duration_seconds: float)` + * **Inputs:** + * `video_path`: Caminho para o arquivo de vídeo silencioso (string). + * `prompt`: Prompt textual para guiar a geração de áudio (string). + * `negative_prompt`: Prompt negativo para áudio (string). + * `duration_seconds`: Duração exata do vídeo (float). + * **Output:** `str` (O caminho para o novo arquivo de vídeo com a faixa de áudio integrada). + +--- + +## 🔗 Projetos Originais e Atribuições +(A seção de atribuições e licenças permanece a mesma que definimos anteriormente) + +### DreamO +* **Repositório Original:** [https://github.com/bytedance/DreamO](https://github.com/bytedance/DreamO) +... + +### LTX-Video +* **Repositório Original:** [https://github.com/Lightricks/LTX-Video](https://github.com/Lightricks/LTX-Video) +... + +### MMAudio +* **Repositório Original:** [https://github.com/hkchengrex/MMAudio](https://github.com/hkchengrex/MMAudio) +... \ No newline at end of file diff --git a/mmaudio/__init__.py b/mmaudio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/data/__init__.py b/mmaudio/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/data/av_utils.py b/mmaudio/data/av_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7d4945b9658b8208f039e72d78e1dac45ae5e12d --- /dev/null +++ b/mmaudio/data/av_utils.py @@ -0,0 +1,136 @@ +from dataclasses import dataclass +from fractions import Fraction +from pathlib import Path +from typing import Optional + +import av +import numpy as np +import torch +from av import AudioFrame + + +@dataclass +class VideoInfo: + duration_sec: float + fps: Fraction + clip_frames: torch.Tensor + sync_frames: torch.Tensor + all_frames: Optional[list[np.ndarray]] + + @property + def height(self): + return self.all_frames[0].shape[0] + + @property + def width(self): + return self.all_frames[0].shape[1] + + +def read_frames(video_path: Path, list_of_fps: list[float], start_sec: float, end_sec: float, + need_all_frames: bool) -> tuple[list[np.ndarray], list[np.ndarray], Fraction]: + output_frames = [[] for _ in list_of_fps] + next_frame_time_for_each_fps = [0.0 for _ in list_of_fps] + time_delta_for_each_fps = [1 / fps for fps in list_of_fps] + all_frames = [] + + # container = av.open(video_path) + with av.open(video_path) as container: + stream = container.streams.video[0] + fps = stream.guessed_rate + stream.thread_type = 'AUTO' + for packet in container.demux(stream): + for frame in packet.decode(): + frame_time = frame.time + if frame_time < start_sec: + continue + if frame_time > end_sec: + break + + frame_np = None + if need_all_frames: + frame_np = frame.to_ndarray(format='rgb24') + all_frames.append(frame_np) + + for i, _ in enumerate(list_of_fps): + this_time = frame_time + while this_time >= next_frame_time_for_each_fps[i]: + if frame_np is None: + frame_np = frame.to_ndarray(format='rgb24') + + output_frames[i].append(frame_np) + next_frame_time_for_each_fps[i] += time_delta_for_each_fps[i] + + output_frames = [np.stack(frames) for frames in output_frames] + return output_frames, all_frames, fps + + +def reencode_with_audio(video_info: VideoInfo, output_path: Path, audio: torch.Tensor, + sampling_rate: int): + container = av.open(output_path, 'w') + output_video_stream = container.add_stream('h264', video_info.fps) + output_video_stream.codec_context.bit_rate = 10 * 1e6 # 10 Mbps + output_video_stream.width = video_info.width + output_video_stream.height = video_info.height + output_video_stream.pix_fmt = 'yuv420p' + + output_audio_stream = container.add_stream('aac', sampling_rate) + + # encode video + for image in video_info.all_frames: + image = av.VideoFrame.from_ndarray(image) + packet = output_video_stream.encode(image) + container.mux(packet) + + for packet in output_video_stream.encode(): + container.mux(packet) + + # convert float tensor audio to numpy array + audio_np = audio.numpy().astype(np.float32) + audio_frame = AudioFrame.from_ndarray(audio_np, format='flt', layout='mono') + audio_frame.sample_rate = sampling_rate + + for packet in output_audio_stream.encode(audio_frame): + container.mux(packet) + + for packet in output_audio_stream.encode(): + container.mux(packet) + + container.close() + + +def remux_with_audio(video_path: Path, audio: torch.Tensor, output_path: Path, sampling_rate: int): + """ + NOTE: I don't think we can get the exact video duration right without re-encoding + so we are not using this but keeping it here for reference + """ + video = av.open(video_path) + output = av.open(output_path, 'w') + input_video_stream = video.streams.video[0] + output_video_stream = output.add_stream(template=input_video_stream) + output_audio_stream = output.add_stream('aac', sampling_rate) + + duration_sec = audio.shape[-1] / sampling_rate + + for packet in video.demux(input_video_stream): + # We need to skip the "flushing" packets that `demux` generates. + if packet.dts is None: + continue + # We need to assign the packet to the new stream. + packet.stream = output_video_stream + output.mux(packet) + + # convert float tensor audio to numpy array + audio_np = audio.numpy().astype(np.float32) + audio_frame = av.AudioFrame.from_ndarray(audio_np, format='flt', layout='mono') + audio_frame.sample_rate = sampling_rate + + for packet in output_audio_stream.encode(audio_frame): + output.mux(packet) + + for packet in output_audio_stream.encode(): + output.mux(packet) + + video.close() + output.close() + + output.close() diff --git a/mmaudio/eval_utils.py b/mmaudio/eval_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a5c9291f2687855b10b63b3f6e67e299c86cbbbe --- /dev/null +++ b/mmaudio/eval_utils.py @@ -0,0 +1,217 @@ +import dataclasses +import logging +from pathlib import Path +from typing import Optional + +import torch +from colorlog import ColoredFormatter +from torchvision.transforms import v2 + +from mmaudio.data.av_utils import VideoInfo, read_frames, reencode_with_audio +from mmaudio.model.flow_matching import FlowMatching +from mmaudio.model.networks import MMAudio +from mmaudio.model.sequence_config import (CONFIG_16K, CONFIG_44K, SequenceConfig) +from mmaudio.model.utils.features_utils import FeaturesUtils +from mmaudio.utils.download_utils import download_model_if_needed + +log = logging.getLogger() + + +@dataclasses.dataclass +class ModelConfig: + model_name: str + model_path: Path + vae_path: Path + bigvgan_16k_path: Optional[Path] + mode: str + synchformer_ckpt: Path = Path('./ext_weights/synchformer_state_dict.pth') + + @property + def seq_cfg(self) -> SequenceConfig: + if self.mode == '16k': + return CONFIG_16K + elif self.mode == '44k': + return CONFIG_44K + + def download_if_needed(self): + download_model_if_needed(self.model_path) + download_model_if_needed(self.vae_path) + if self.bigvgan_16k_path is not None: + download_model_if_needed(self.bigvgan_16k_path) + download_model_if_needed(self.synchformer_ckpt) + + +small_16k = ModelConfig(model_name='small_16k', + model_path=Path('./weights/mmaudio_small_16k.pth'), + vae_path=Path('./ext_weights/v1-16.pth'), + bigvgan_16k_path=Path('./ext_weights/best_netG.pt'), + mode='16k') +small_44k = ModelConfig(model_name='small_44k', + model_path=Path('./weights/mmaudio_small_44k.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +medium_44k = ModelConfig(model_name='medium_44k', + model_path=Path('./weights/mmaudio_medium_44k.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +large_44k = ModelConfig(model_name='large_44k', + model_path=Path('./weights/mmaudio_large_44k.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +large_44k_v2 = ModelConfig(model_name='large_44k_v2', + model_path=Path('./weights/mmaudio_large_44k_v2.pth'), + vae_path=Path('./ext_weights/v1-44.pth'), + bigvgan_16k_path=None, + mode='44k') +all_model_cfg: dict[str, ModelConfig] = { + 'small_16k': small_16k, + 'small_44k': small_44k, + 'medium_44k': medium_44k, + 'large_44k': large_44k, + 'large_44k_v2': large_44k_v2, +} + + +def generate( + clip_video: Optional[torch.Tensor], + sync_video: Optional[torch.Tensor], + text: Optional[list[str]], + *, + negative_text: Optional[list[str]] = None, + feature_utils: FeaturesUtils, + net: MMAudio, + fm: FlowMatching, + rng: torch.Generator, + cfg_strength: float, + clip_batch_size_multiplier: int = 40, + sync_batch_size_multiplier: int = 40, +) -> torch.Tensor: + device = feature_utils.device + dtype = feature_utils.dtype + + bs = len(text) + if clip_video is not None: + clip_video = clip_video.to(device, dtype, non_blocking=True) + clip_features = feature_utils.encode_video_with_clip(clip_video, + batch_size=bs * + clip_batch_size_multiplier) + else: + clip_features = net.get_empty_clip_sequence(bs) + + if sync_video is not None: + sync_video = sync_video.to(device, dtype, non_blocking=True) + sync_features = feature_utils.encode_video_with_sync(sync_video, + batch_size=bs * + sync_batch_size_multiplier) + else: + sync_features = net.get_empty_sync_sequence(bs) + + if text is not None: + text_features = feature_utils.encode_text(text) + else: + text_features = net.get_empty_string_sequence(bs) + + if negative_text is not None: + assert len(negative_text) == bs + negative_text_features = feature_utils.encode_text(negative_text) + else: + negative_text_features = net.get_empty_string_sequence(bs) + + x0 = torch.randn(bs, + net.latent_seq_len, + net.latent_dim, + device=device, + dtype=dtype, + generator=rng) + preprocessed_conditions = net.preprocess_conditions(clip_features, sync_features, text_features) + empty_conditions = net.get_empty_conditions( + bs, negative_text_features=negative_text_features if negative_text is not None else None) + + cfg_ode_wrapper = lambda t, x: net.ode_wrapper(t, x, preprocessed_conditions, empty_conditions, + cfg_strength) + x1 = fm.to_data(cfg_ode_wrapper, x0) + x1 = net.unnormalize(x1) + spec = feature_utils.decode(x1) + audio = feature_utils.vocode(spec) + return audio + + +LOGFORMAT = " %(log_color)s%(levelname)-8s%(reset)s | %(log_color)s%(message)s%(reset)s" + + +def setup_eval_logging(log_level: int = logging.INFO): + logging.root.setLevel(log_level) + formatter = ColoredFormatter(LOGFORMAT) + stream = logging.StreamHandler() + stream.setLevel(log_level) + stream.setFormatter(formatter) + log = logging.getLogger() + log.setLevel(log_level) + log.addHandler(stream) + + +def load_video(video_path: Path, duration_sec: float, load_all_frames: bool = True) -> VideoInfo: + _CLIP_SIZE = 384 + _CLIP_FPS = 8.0 + + _SYNC_SIZE = 224 + _SYNC_FPS = 25.0 + + clip_transform = v2.Compose([ + v2.Resize((_CLIP_SIZE, _CLIP_SIZE), interpolation=v2.InterpolationMode.BICUBIC), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + ]) + + sync_transform = v2.Compose([ + v2.Resize(_SYNC_SIZE, interpolation=v2.InterpolationMode.BICUBIC), + v2.CenterCrop(_SYNC_SIZE), + v2.ToImage(), + v2.ToDtype(torch.float32, scale=True), + v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]), + ]) + + output_frames, all_frames, orig_fps = read_frames(video_path, + list_of_fps=[_CLIP_FPS, _SYNC_FPS], + start_sec=0, + end_sec=duration_sec, + need_all_frames=load_all_frames) + + clip_chunk, sync_chunk = output_frames + clip_chunk = torch.from_numpy(clip_chunk).permute(0, 3, 1, 2) + sync_chunk = torch.from_numpy(sync_chunk).permute(0, 3, 1, 2) + + clip_frames = clip_transform(clip_chunk) + sync_frames = sync_transform(sync_chunk) + + clip_length_sec = clip_frames.shape[0] / _CLIP_FPS + sync_length_sec = sync_frames.shape[0] / _SYNC_FPS + + if clip_length_sec < duration_sec: + log.warning(f'Clip video is too short: {clip_length_sec:.2f} < {duration_sec:.2f}') + log.warning(f'Truncating to {clip_length_sec:.2f} sec') + duration_sec = clip_length_sec + + if sync_length_sec < duration_sec: + log.warning(f'Sync video is too short: {sync_length_sec:.2f} < {duration_sec:.2f}') + log.warning(f'Truncating to {sync_length_sec:.2f} sec') + duration_sec = sync_length_sec + + clip_frames = clip_frames[:int(_CLIP_FPS * duration_sec)] + sync_frames = sync_frames[:int(_SYNC_FPS * duration_sec)] + + video_info = VideoInfo( + duration_sec=duration_sec, + fps=orig_fps, + clip_frames=clip_frames, + sync_frames=sync_frames, + all_frames=all_frames if load_all_frames else None, + ) + return video_info + + +def make_video(video_info: VideoInfo, output_path: Path, audio: torch.Tensor, sampling_rate: int): + reencode_with_audio(video_info, output_path, audio, sampling_rate) diff --git a/mmaudio/ext/__init__.py b/mmaudio/ext/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/mmaudio/ext/__init__.py @@ -0,0 +1 @@ + diff --git a/mmaudio/ext/autoencoder/__init__.py b/mmaudio/ext/autoencoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e5a876391c1e48970e93ff45f212f21f86d4d0c9 --- /dev/null +++ b/mmaudio/ext/autoencoder/__init__.py @@ -0,0 +1 @@ +from .autoencoder import AutoEncoderModule diff --git a/mmaudio/ext/autoencoder/autoencoder.py b/mmaudio/ext/autoencoder/autoencoder.py new file mode 100644 index 0000000000000000000000000000000000000000..5b444656112f9c4e5d9493c8fce40c118a2e31d5 --- /dev/null +++ b/mmaudio/ext/autoencoder/autoencoder.py @@ -0,0 +1,52 @@ +from typing import Literal, Optional + +import torch +import torch.nn as nn + +from mmaudio.ext.autoencoder.vae import VAE, get_my_vae +from mmaudio.ext.bigvgan import BigVGAN +from mmaudio.ext.bigvgan_v2.bigvgan import BigVGAN as BigVGANv2 +from mmaudio.model.utils.distributions import DiagonalGaussianDistribution + + +class AutoEncoderModule(nn.Module): + + def __init__(self, + *, + vae_ckpt_path, + vocoder_ckpt_path: Optional[str] = None, + mode: Literal['16k', '44k'], + need_vae_encoder: bool = True): + super().__init__() + self.vae: VAE = get_my_vae(mode).eval() + vae_state_dict = torch.load(vae_ckpt_path, weights_only=True, map_location='cpu') + self.vae.load_state_dict(vae_state_dict, strict=False) + self.vae.remove_weight_norm() + + if mode == '16k': + assert vocoder_ckpt_path is not None + self.vocoder = BigVGAN(vocoder_ckpt_path).eval() + elif mode == '44k': + self.vocoder = BigVGANv2.from_pretrained('nvidia/bigvgan_v2_44khz_128band_512x', + use_cuda_kernel=False) + self.vocoder.remove_weight_norm() + else: + raise ValueError(f'Unknown mode: {mode}') + + for param in self.parameters(): + param.requires_grad = False + + if not need_vae_encoder: + del self.vae.encoder + + @torch.inference_mode() + def encode(self, x: torch.Tensor) -> DiagonalGaussianDistribution: + return self.vae.encode(x) + + @torch.inference_mode() + def decode(self, z: torch.Tensor) -> torch.Tensor: + return self.vae.decode(z) + + @torch.inference_mode() + def vocode(self, spec: torch.Tensor) -> torch.Tensor: + return self.vocoder(spec) diff --git a/mmaudio/ext/autoencoder/edm2_utils.py b/mmaudio/ext/autoencoder/edm2_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a18ffba5cc42214fddf1300034be2eff2760025c --- /dev/null +++ b/mmaudio/ext/autoencoder/edm2_utils.py @@ -0,0 +1,168 @@ +# Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# +# This work is licensed under a Creative Commons +# Attribution-NonCommercial-ShareAlike 4.0 International License. +# You should have received a copy of the license along with this +# work. If not, see http://creativecommons.org/licenses/by-nc-sa/4.0/ +"""Improved diffusion model architecture proposed in the paper +"Analyzing and Improving the Training Dynamics of Diffusion Models".""" + +import numpy as np +import torch + +#---------------------------------------------------------------------------- +# Variant of constant() that inherits dtype and device from the given +# reference tensor by default. + +_constant_cache = dict() + + +def constant(value, shape=None, dtype=None, device=None, memory_format=None): + value = np.asarray(value) + if shape is not None: + shape = tuple(shape) + if dtype is None: + dtype = torch.get_default_dtype() + if device is None: + device = torch.device('cpu') + if memory_format is None: + memory_format = torch.contiguous_format + + key = (value.shape, value.dtype, value.tobytes(), shape, dtype, device, memory_format) + tensor = _constant_cache.get(key, None) + if tensor is None: + tensor = torch.as_tensor(value.copy(), dtype=dtype, device=device) + if shape is not None: + tensor, _ = torch.broadcast_tensors(tensor, torch.empty(shape)) + tensor = tensor.contiguous(memory_format=memory_format) + _constant_cache[key] = tensor + return tensor + + +def const_like(ref, value, shape=None, dtype=None, device=None, memory_format=None): + if dtype is None: + dtype = ref.dtype + if device is None: + device = ref.device + return constant(value, shape=shape, dtype=dtype, device=device, memory_format=memory_format) + + +#---------------------------------------------------------------------------- +# Normalize given tensor to unit magnitude with respect to the given +# dimensions. Default = all dimensions except the first. + + +def normalize(x, dim=None, eps=1e-4): + if dim is None: + dim = list(range(1, x.ndim)) + norm = torch.linalg.vector_norm(x, dim=dim, keepdim=True, dtype=torch.float32) + norm = torch.add(eps, norm, alpha=np.sqrt(norm.numel() / x.numel())) + return x / norm.to(x.dtype) + + +class Normalize(torch.nn.Module): + + def __init__(self, dim=None, eps=1e-4): + super().__init__() + self.dim = dim + self.eps = eps + + def forward(self, x): + return normalize(x, dim=self.dim, eps=self.eps) + + +#---------------------------------------------------------------------------- +# Upsample or downsample the given tensor with the given filter, +# or keep it as is. + + +def resample(x, f=[1, 1], mode='keep'): + if mode == 'keep': + return x + f = np.float32(f) + assert f.ndim == 1 and len(f) % 2 == 0 + pad = (len(f) - 1) // 2 + f = f / f.sum() + f = np.outer(f, f)[np.newaxis, np.newaxis, :, :] + f = const_like(x, f) + c = x.shape[1] + if mode == 'down': + return torch.nn.functional.conv2d(x, + f.tile([c, 1, 1, 1]), + groups=c, + stride=2, + padding=(pad, )) + assert mode == 'up' + return torch.nn.functional.conv_transpose2d(x, (f * 4).tile([c, 1, 1, 1]), + groups=c, + stride=2, + padding=(pad, )) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving SiLU (Equation 81). + + +def mp_silu(x): + return torch.nn.functional.silu(x) / 0.596 + + +class MPSiLU(torch.nn.Module): + + def forward(self, x): + return mp_silu(x) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving sum (Equation 88). + + +def mp_sum(a, b, t=0.5): + return a.lerp(b, t) / np.sqrt((1 - t)**2 + t**2) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving concatenation (Equation 103). + + +def mp_cat(a, b, dim=1, t=0.5): + Na = a.shape[dim] + Nb = b.shape[dim] + C = np.sqrt((Na + Nb) / ((1 - t)**2 + t**2)) + wa = C / np.sqrt(Na) * (1 - t) + wb = C / np.sqrt(Nb) * t + return torch.cat([wa * a, wb * b], dim=dim) + + +#---------------------------------------------------------------------------- +# Magnitude-preserving convolution or fully-connected layer (Equation 47) +# with force weight normalization (Equation 66). + + +class MPConv1D(torch.nn.Module): + + def __init__(self, in_channels, out_channels, kernel_size): + super().__init__() + self.out_channels = out_channels + self.weight = torch.nn.Parameter(torch.randn(out_channels, in_channels, kernel_size)) + + self.weight_norm_removed = False + + def forward(self, x, gain=1): + assert self.weight_norm_removed, 'call remove_weight_norm() before inference' + + w = self.weight * gain + if w.ndim == 2: + return x @ w.t() + assert w.ndim == 3 + return torch.nn.functional.conv1d(x, w, padding=(w.shape[-1] // 2, )) + + def remove_weight_norm(self): + w = self.weight.to(torch.float32) + w = normalize(w) # traditional weight normalization + w = w / np.sqrt(w[0].numel()) + w = w.to(self.weight.dtype) + self.weight.data.copy_(w) + + self.weight_norm_removed = True + return self diff --git a/mmaudio/ext/autoencoder/vae.py b/mmaudio/ext/autoencoder/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..204c2e01cf9fc89eb718f8aa266a1c6a7e443312 --- /dev/null +++ b/mmaudio/ext/autoencoder/vae.py @@ -0,0 +1,373 @@ +import logging +from typing import Optional + +import torch +import torch.nn as nn + +from mmaudio.ext.autoencoder.edm2_utils import MPConv1D +from mmaudio.ext.autoencoder.vae_modules import (AttnBlock1D, Downsample1D, ResnetBlock1D, + Upsample1D, nonlinearity) +from mmaudio.model.utils.distributions import DiagonalGaussianDistribution + +log = logging.getLogger() + +DATA_MEAN_80D = [ + -1.6058, -1.3676, -1.2520, -1.2453, -1.2078, -1.2224, -1.2419, -1.2439, -1.2922, -1.2927, + -1.3170, -1.3543, -1.3401, -1.3836, -1.3907, -1.3912, -1.4313, -1.4152, -1.4527, -1.4728, + -1.4568, -1.5101, -1.5051, -1.5172, -1.5623, -1.5373, -1.5746, -1.5687, -1.6032, -1.6131, + -1.6081, -1.6331, -1.6489, -1.6489, -1.6700, -1.6738, -1.6953, -1.6969, -1.7048, -1.7280, + -1.7361, -1.7495, -1.7658, -1.7814, -1.7889, -1.8064, -1.8221, -1.8377, -1.8417, -1.8643, + -1.8857, -1.8929, -1.9173, -1.9379, -1.9531, -1.9673, -1.9824, -2.0042, -2.0215, -2.0436, + -2.0766, -2.1064, -2.1418, -2.1855, -2.2319, -2.2767, -2.3161, -2.3572, -2.3954, -2.4282, + -2.4659, -2.5072, -2.5552, -2.6074, -2.6584, -2.7107, -2.7634, -2.8266, -2.8981, -2.9673 +] + +DATA_STD_80D = [ + 1.0291, 1.0411, 1.0043, 0.9820, 0.9677, 0.9543, 0.9450, 0.9392, 0.9343, 0.9297, 0.9276, 0.9263, + 0.9242, 0.9254, 0.9232, 0.9281, 0.9263, 0.9315, 0.9274, 0.9247, 0.9277, 0.9199, 0.9188, 0.9194, + 0.9160, 0.9161, 0.9146, 0.9161, 0.9100, 0.9095, 0.9145, 0.9076, 0.9066, 0.9095, 0.9032, 0.9043, + 0.9038, 0.9011, 0.9019, 0.9010, 0.8984, 0.8983, 0.8986, 0.8961, 0.8962, 0.8978, 0.8962, 0.8973, + 0.8993, 0.8976, 0.8995, 0.9016, 0.8982, 0.8972, 0.8974, 0.8949, 0.8940, 0.8947, 0.8936, 0.8939, + 0.8951, 0.8956, 0.9017, 0.9167, 0.9436, 0.9690, 1.0003, 1.0225, 1.0381, 1.0491, 1.0545, 1.0604, + 1.0761, 1.0929, 1.1089, 1.1196, 1.1176, 1.1156, 1.1117, 1.1070 +] + +DATA_MEAN_128D = [ + -3.3462, -2.6723, -2.4893, -2.3143, -2.2664, -2.3317, -2.1802, -2.4006, -2.2357, -2.4597, + -2.3717, -2.4690, -2.5142, -2.4919, -2.6610, -2.5047, -2.7483, -2.5926, -2.7462, -2.7033, + -2.7386, -2.8112, -2.7502, -2.9594, -2.7473, -3.0035, -2.8891, -2.9922, -2.9856, -3.0157, + -3.1191, -2.9893, -3.1718, -3.0745, -3.1879, -3.2310, -3.1424, -3.2296, -3.2791, -3.2782, + -3.2756, -3.3134, -3.3509, -3.3750, -3.3951, -3.3698, -3.4505, -3.4509, -3.5089, -3.4647, + -3.5536, -3.5788, -3.5867, -3.6036, -3.6400, -3.6747, -3.7072, -3.7279, -3.7283, -3.7795, + -3.8259, -3.8447, -3.8663, -3.9182, -3.9605, -3.9861, -4.0105, -4.0373, -4.0762, -4.1121, + -4.1488, -4.1874, -4.2461, -4.3170, -4.3639, -4.4452, -4.5282, -4.6297, -4.7019, -4.7960, + -4.8700, -4.9507, -5.0303, -5.0866, -5.1634, -5.2342, -5.3242, -5.4053, -5.4927, -5.5712, + -5.6464, -5.7052, -5.7619, -5.8410, -5.9188, -6.0103, -6.0955, -6.1673, -6.2362, -6.3120, + -6.3926, -6.4797, -6.5565, -6.6511, -6.8130, -6.9961, -7.1275, -7.2457, -7.3576, -7.4663, + -7.6136, -7.7469, -7.8815, -8.0132, -8.1515, -8.3071, -8.4722, -8.7418, -9.3975, -9.6628, + -9.7671, -9.8863, -9.9992, -10.0860, -10.1709, -10.5418, -11.2795, -11.3861 +] + +DATA_STD_128D = [ + 2.3804, 2.4368, 2.3772, 2.3145, 2.2803, 2.2510, 2.2316, 2.2083, 2.1996, 2.1835, 2.1769, 2.1659, + 2.1631, 2.1618, 2.1540, 2.1606, 2.1571, 2.1567, 2.1612, 2.1579, 2.1679, 2.1683, 2.1634, 2.1557, + 2.1668, 2.1518, 2.1415, 2.1449, 2.1406, 2.1350, 2.1313, 2.1415, 2.1281, 2.1352, 2.1219, 2.1182, + 2.1327, 2.1195, 2.1137, 2.1080, 2.1179, 2.1036, 2.1087, 2.1036, 2.1015, 2.1068, 2.0975, 2.0991, + 2.0902, 2.1015, 2.0857, 2.0920, 2.0893, 2.0897, 2.0910, 2.0881, 2.0925, 2.0873, 2.0960, 2.0900, + 2.0957, 2.0958, 2.0978, 2.0936, 2.0886, 2.0905, 2.0845, 2.0855, 2.0796, 2.0840, 2.0813, 2.0817, + 2.0838, 2.0840, 2.0917, 2.1061, 2.1431, 2.1976, 2.2482, 2.3055, 2.3700, 2.4088, 2.4372, 2.4609, + 2.4731, 2.4847, 2.5072, 2.5451, 2.5772, 2.6147, 2.6529, 2.6596, 2.6645, 2.6726, 2.6803, 2.6812, + 2.6899, 2.6916, 2.6931, 2.6998, 2.7062, 2.7262, 2.7222, 2.7158, 2.7041, 2.7485, 2.7491, 2.7451, + 2.7485, 2.7233, 2.7297, 2.7233, 2.7145, 2.6958, 2.6788, 2.6439, 2.6007, 2.4786, 2.2469, 2.1877, + 2.1392, 2.0717, 2.0107, 1.9676, 1.9140, 1.7102, 0.9101, 0.7164 +] + + +class VAE(nn.Module): + + def __init__( + self, + *, + data_dim: int, + embed_dim: int, + hidden_dim: int, + ): + super().__init__() + + if data_dim == 80: + # self.data_mean = torch.tensor(DATA_MEAN_80D, dtype=torch.float32).cuda() + # self.data_std = torch.tensor(DATA_STD_80D, dtype=torch.float32).cuda() + self.register_buffer('data_mean', torch.tensor(DATA_MEAN_80D, dtype=torch.float32)) + self.register_buffer('data_std', torch.tensor(DATA_STD_80D, dtype=torch.float32)) + elif data_dim == 128: + # torch.tensor(DATA_MEAN_128D, dtype=torch.float32).cuda() + # self.data_std = torch.tensor(DATA_STD_128D, dtype=torch.float32).cuda() + self.register_buffer('data_mean', torch.tensor(DATA_MEAN_128D, dtype=torch.float32)) + self.register_buffer('data_std', torch.tensor(DATA_STD_128D, dtype=torch.float32)) + + self.data_mean = self.data_mean.view(1, -1, 1) + self.data_std = self.data_std.view(1, -1, 1) + + self.encoder = Encoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + embed_dim=embed_dim, + ) + self.decoder = Decoder1D( + dim=hidden_dim, + ch_mult=(1, 2, 4), + num_res_blocks=2, + attn_layers=[3], + down_layers=[0], + in_dim=data_dim, + out_dim=data_dim, + embed_dim=embed_dim, + ) + + self.embed_dim = embed_dim + # self.quant_conv = nn.Conv1d(2 * embed_dim, 2 * embed_dim, 1) + # self.post_quant_conv = nn.Conv1d(embed_dim, embed_dim, 1) + + self.initialize_weights() + + def initialize_weights(self): + pass + + def encode(self, x: torch.Tensor, normalize: bool = True) -> DiagonalGaussianDistribution: + if normalize: + x = self.normalize(x) + moments = self.encoder(x) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z: torch.Tensor, unnormalize: bool = True) -> torch.Tensor: + dec = self.decoder(z) + if unnormalize: + dec = self.unnormalize(dec) + return dec + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + return (x - self.data_mean) / self.data_std + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + return x * self.data_std + self.data_mean + + def forward( + self, + x: torch.Tensor, + sample_posterior: bool = True, + rng: Optional[torch.Generator] = None, + normalize: bool = True, + unnormalize: bool = True, + ) -> tuple[torch.Tensor, DiagonalGaussianDistribution]: + + posterior = self.encode(x, normalize=normalize) + if sample_posterior: + z = posterior.sample(rng) + else: + z = posterior.mode() + dec = self.decode(z, unnormalize=unnormalize) + return dec, posterior + + def load_weights(self, src_dict) -> None: + self.load_state_dict(src_dict, strict=True) + + @property + def device(self) -> torch.device: + return next(self.parameters()).device + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def remove_weight_norm(self): + for name, m in self.named_modules(): + if isinstance(m, MPConv1D): + m.remove_weight_norm() + log.debug(f"Removed weight norm from {name}") + return self + + +class Encoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + double_z: bool = True, + kernel_size: int = 3, + clip_act: float = 256.0): + super().__init__() + self.dim = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = down_layers + self.attn_layers = attn_layers + self.conv_in = MPConv1D(in_dim, self.dim, kernel_size=kernel_size) + + in_ch_mult = (1, ) + tuple(ch_mult) + self.in_ch_mult = in_ch_mult + # downsampling + self.down = nn.ModuleList() + for i_level in range(self.num_layers): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = dim * in_ch_mult[i_level] + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append( + ResnetBlock1D(in_dim=block_in, + out_dim=block_out, + kernel_size=kernel_size, + use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level in down_layers: + down.downsample = Downsample1D(block_in, resamp_with_conv) + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, + out_dim=block_in, + kernel_size=kernel_size, + use_norm=True) + + # end + self.conv_out = MPConv1D(block_in, + 2 * embed_dim if double_z else embed_dim, + kernel_size=kernel_size) + + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, x): + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_layers): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1]) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + hs.append(h) + if i_level in self.down_layers: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # end + h = nonlinearity(h) + h = self.conv_out(h, gain=(self.learnable_gain + 1)) + return h + + +class Decoder1D(nn.Module): + + def __init__(self, + *, + dim: int, + out_dim: int, + ch_mult: tuple[int] = (1, 2, 4, 8), + num_res_blocks: int, + attn_layers: list[int] = [], + down_layers: list[int] = [], + kernel_size: int = 3, + resamp_with_conv: bool = True, + in_dim: int, + embed_dim: int, + clip_act: float = 256.0): + super().__init__() + self.ch = dim + self.num_layers = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.in_channels = in_dim + self.clip_act = clip_act + self.down_layers = [i + 1 for i in down_layers] # each downlayer add one + + # compute in_ch_mult, block_in and curr_res at lowest res + block_in = dim * ch_mult[self.num_layers - 1] + + # z to block_in + self.conv_in = MPConv1D(embed_dim, block_in, kernel_size=kernel_size) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + self.mid.attn_1 = AttnBlock1D(block_in) + self.mid.block_2 = ResnetBlock1D(in_dim=block_in, out_dim=block_in, use_norm=True) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_layers)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = dim * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + block.append(ResnetBlock1D(in_dim=block_in, out_dim=block_out, use_norm=True)) + block_in = block_out + if i_level in attn_layers: + attn.append(AttnBlock1D(block_in)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level in self.down_layers: + up.upsample = Upsample1D(block_in, resamp_with_conv) + self.up.insert(0, up) # prepend to get consistent order + + # end + self.conv_out = MPConv1D(block_in, out_dim, kernel_size=kernel_size) + self.learnable_gain = nn.Parameter(torch.zeros([])) + + def forward(self, z): + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h) + h = self.mid.attn_1(h) + h = self.mid.block_2(h) + h = h.clamp(-self.clip_act, self.clip_act) + + # upsampling + for i_level in reversed(range(self.num_layers)): + for i_block in range(self.num_res_blocks + 1): + h = self.up[i_level].block[i_block](h) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + h = h.clamp(-self.clip_act, self.clip_act) + if i_level in self.down_layers: + h = self.up[i_level].upsample(h) + + h = nonlinearity(h) + h = self.conv_out(h, gain=(self.learnable_gain + 1)) + return h + + +def VAE_16k(**kwargs) -> VAE: + return VAE(data_dim=80, embed_dim=20, hidden_dim=384, **kwargs) + + +def VAE_44k(**kwargs) -> VAE: + return VAE(data_dim=128, embed_dim=40, hidden_dim=512, **kwargs) + + +def get_my_vae(name: str, **kwargs) -> VAE: + if name == '16k': + return VAE_16k(**kwargs) + if name == '44k': + return VAE_44k(**kwargs) + raise ValueError(f'Unknown model: {name}') + + +if __name__ == '__main__': + network = get_my_vae('standard') + + # print the number of parameters in terms of millions + num_params = sum(p.numel() for p in network.parameters()) / 1e6 + print(f'Number of parameters: {num_params:.2f}M') diff --git a/mmaudio/ext/autoencoder/vae_modules.py b/mmaudio/ext/autoencoder/vae_modules.py new file mode 100644 index 0000000000000000000000000000000000000000..c59ff41e86303e518688fd3f56ade08f4550f2aa --- /dev/null +++ b/mmaudio/ext/autoencoder/vae_modules.py @@ -0,0 +1,117 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange + +from mmaudio.ext.autoencoder.edm2_utils import (MPConv1D, mp_silu, mp_sum, normalize) + + +def nonlinearity(x): + # swish + return mp_silu(x) + + +class ResnetBlock1D(nn.Module): + + def __init__(self, *, in_dim, out_dim=None, conv_shortcut=False, kernel_size=3, use_norm=True): + super().__init__() + self.in_dim = in_dim + out_dim = in_dim if out_dim is None else out_dim + self.out_dim = out_dim + self.use_conv_shortcut = conv_shortcut + self.use_norm = use_norm + + self.conv1 = MPConv1D(in_dim, out_dim, kernel_size=kernel_size) + self.conv2 = MPConv1D(out_dim, out_dim, kernel_size=kernel_size) + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + self.conv_shortcut = MPConv1D(in_dim, out_dim, kernel_size=kernel_size) + else: + self.nin_shortcut = MPConv1D(in_dim, out_dim, kernel_size=1) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + + # pixel norm + if self.use_norm: + x = normalize(x, dim=1) + + h = x + h = nonlinearity(h) + h = self.conv1(h) + + h = nonlinearity(h) + h = self.conv2(h) + + if self.in_dim != self.out_dim: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return mp_sum(x, h, t=0.3) + + +class AttnBlock1D(nn.Module): + + def __init__(self, in_channels, num_heads=1): + super().__init__() + self.in_channels = in_channels + + self.num_heads = num_heads + self.qkv = MPConv1D(in_channels, in_channels * 3, kernel_size=1) + self.proj_out = MPConv1D(in_channels, in_channels, kernel_size=1) + + def forward(self, x): + h = x + y = self.qkv(h) + y = y.reshape(y.shape[0], self.num_heads, -1, 3, y.shape[-1]) + q, k, v = normalize(y, dim=2).unbind(3) + + q = rearrange(q, 'b h c l -> b h l c') + k = rearrange(k, 'b h c l -> b h l c') + v = rearrange(v, 'b h c l -> b h l c') + + h = F.scaled_dot_product_attention(q, k, v) + h = rearrange(h, 'b h l c -> b (h c) l') + + h = self.proj_out(h) + + return mp_sum(x, h, t=0.3) + + +class Upsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = MPConv1D(in_channels, in_channels, kernel_size=3) + + def forward(self, x): + x = F.interpolate(x, scale_factor=2.0, mode='nearest-exact') # support 3D tensor(B,C,T) + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample1D(nn.Module): + + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv1 = MPConv1D(in_channels, in_channels, kernel_size=1) + self.conv2 = MPConv1D(in_channels, in_channels, kernel_size=1) + + def forward(self, x): + + if self.with_conv: + x = self.conv1(x) + + x = F.avg_pool1d(x, kernel_size=2, stride=2) + + if self.with_conv: + x = self.conv2(x) + + return x diff --git a/mmaudio/ext/bigvgan/LICENSE b/mmaudio/ext/bigvgan/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e9663595cc28938f88d6299acd3ba791542e4c0c --- /dev/null +++ b/mmaudio/ext/bigvgan/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/__init__.py b/mmaudio/ext/bigvgan/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..00f13e9bf9ccb0b4ec37e1c70869f9a9a538871f --- /dev/null +++ b/mmaudio/ext/bigvgan/__init__.py @@ -0,0 +1 @@ +from .bigvgan import BigVGAN diff --git a/mmaudio/ext/bigvgan/activations.py b/mmaudio/ext/bigvgan/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..61f2808a5466b3cf4d041059700993af5527dd29 --- /dev/null +++ b/mmaudio/ext/bigvgan/activations.py @@ -0,0 +1,120 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + ''' + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + ''' + super(Snake, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + ''' + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + ''' + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/__init__.py b/mmaudio/ext/bigvgan/alias_free_torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a2318b63198250856809c0cb46210a4147b829bc --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/act.py b/mmaudio/ext/bigvgan/alias_free_torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..028debd697dd60458aae75010057df038bd3518a --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/act.py @@ -0,0 +1,28 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from .resample import UpSample1d, DownSample1d + + +class Activation1d(nn.Module): + def __init__(self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/filter.py b/mmaudio/ext/bigvgan/alias_free_torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad6ea87c1f10ddd94c544037791d7a4634d5ae1 --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/filter.py @@ -0,0 +1,95 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if 'sinc' in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where(x == 0, + torch.tensor(1., device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d(cutoff, half_width, kernel_size): # return filter [1,1,kernel_size] + even = (kernel_size % 2 == 0) + half_size = kernel_size // 2 + + #For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.: + beta = 0.1102 * (A - 8.7) + elif A >= 21.: + beta = 0.5842 * (A - 21)**0.4 + 0.07886 * (A - 21.) + else: + beta = 0. + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = (torch.arange(-half_size, half_size) + 0.5) + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + # Normalize filter to have sum = 1, otherwise we will have a small leakage + # of the constant component in the input signal. + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__(self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = 'replicate', + kernel_size: int = 12): + # kernel_size should be even number for stylegan3 setup, + # in this implementation, odd number is also possible. + super().__init__() + if cutoff < -0.: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = (kernel_size % 2 == 0) + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + #input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), + mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), + stride=self.stride, groups=C) + + return out \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/alias_free_torch/resample.py b/mmaudio/ext/bigvgan/alias_free_torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..750e6c3402cc5ac939c4b9d075246562e0e1d1a7 --- /dev/null +++ b/mmaudio/ext/bigvgan/alias_free_torch/resample.py @@ -0,0 +1,49 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F +from .filter import LowPassFilter1d +from .filter import kaiser_sinc_filter1d + + +class UpSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2 + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode='replicate') + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size + self.lowpass = LowPassFilter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size) + + def forward(self, x): + xx = self.lowpass(x) + + return xx \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/bigvgan.py b/mmaudio/ext/bigvgan/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..032ea1d03e96165571c9ae22d66e00911a605870 --- /dev/null +++ b/mmaudio/ext/bigvgan/bigvgan.py @@ -0,0 +1,32 @@ +from pathlib import Path + +import torch +import torch.nn as nn +from omegaconf import OmegaConf + +from mmaudio.ext.bigvgan.models import BigVGANVocoder + +_bigvgan_vocoder_path = Path(__file__).parent / 'bigvgan_vocoder.yml' + + +class BigVGAN(nn.Module): + + def __init__(self, ckpt_path, config_path=_bigvgan_vocoder_path): + super().__init__() + vocoder_cfg = OmegaConf.load(config_path) + self.vocoder = BigVGANVocoder(vocoder_cfg).eval() + vocoder_ckpt = torch.load(ckpt_path, map_location='cpu', weights_only=True)['generator'] + self.vocoder.load_state_dict(vocoder_ckpt) + + self.weight_norm_removed = False + self.remove_weight_norm() + + @torch.inference_mode() + def forward(self, x): + assert self.weight_norm_removed, 'call remove_weight_norm() before inference' + return self.vocoder(x) + + def remove_weight_norm(self): + self.vocoder.remove_weight_norm() + self.weight_norm_removed = True + return self diff --git a/mmaudio/ext/bigvgan/bigvgan_vocoder.yml b/mmaudio/ext/bigvgan/bigvgan_vocoder.yml new file mode 100644 index 0000000000000000000000000000000000000000..d4db31ec45336e757d94d5099ed16cb3c906c24a --- /dev/null +++ b/mmaudio/ext/bigvgan/bigvgan_vocoder.yml @@ -0,0 +1,63 @@ +resblock: '1' +num_gpus: 0 +batch_size: 64 +num_mels: 80 +learning_rate: 0.0001 +adam_b1: 0.8 +adam_b2: 0.99 +lr_decay: 0.999 +seed: 1234 +upsample_rates: +- 4 +- 4 +- 2 +- 2 +- 2 +- 2 +upsample_kernel_sizes: +- 8 +- 8 +- 4 +- 4 +- 4 +- 4 +upsample_initial_channel: 1536 +resblock_kernel_sizes: +- 3 +- 7 +- 11 +resblock_dilation_sizes: +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +- - 1 + - 3 + - 5 +activation: snakebeta +snake_logscale: true +resolutions: +- - 1024 + - 120 + - 600 +- - 2048 + - 240 + - 1200 +- - 512 + - 50 + - 240 +mpd_reshapes: +- 2 +- 3 +- 5 +- 7 +- 11 +use_spectral_norm: false +discriminator_channel_mult: 1 +num_workers: 4 +dist_config: + dist_backend: nccl + dist_url: tcp://localhost:54341 + world_size: 1 diff --git a/mmaudio/ext/bigvgan/env.py b/mmaudio/ext/bigvgan/env.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be238d4db710c8c9a338d336baea0138f18d1f --- /dev/null +++ b/mmaudio/ext/bigvgan/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/mmaudio/ext/bigvgan/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan/models.py b/mmaudio/ext/bigvgan/models.py new file mode 100644 index 0000000000000000000000000000000000000000..36938e659ebc0e4cb045f10e4893525907c2d1f7 --- /dev/null +++ b/mmaudio/ext/bigvgan/models.py @@ -0,0 +1,255 @@ +# Copyright (c) 2022 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations + +from mmaudio.ext.bigvgan import activations +from mmaudio.ext.bigvgan.alias_free_torch import * +from mmaudio.ext.bigvgan.utils import get_padding, init_weights + +LRELU_SLOPE = 0.1 + + +class AMPBlock1(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5), activation=None): + super(AMPBlock1, self).__init__() + self.h = h + + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_parametrizations(l, 'weight') + for l in self.convs2: + remove_parametrizations(l, 'weight') + + +class AMPBlock2(torch.nn.Module): + + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3), activation=None): + super(AMPBlock2, self).__init__() + self.h = h + + self.convs = nn.ModuleList([ + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d(channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # total number of conv layers + + if activation == 'snake': # periodic nonlinearity with snake function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == 'snakebeta': # periodic nonlinearity with snakebeta function and anti-aliasing + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_parametrizations(l, 'weight') + + +class BigVGANVocoder(torch.nn.Module): + # this is our main BigVGAN model. Applies anti-aliased periodic activation for resblocks. + def __init__(self, h): + super().__init__() + self.h = h + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # pre conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + resblock = AMPBlock1 if h.resblock == '1' else AMPBlock2 + + # transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + weight_norm( + ConvTranspose1d(h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2)) + ])) + + # residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock(h, ch, k, d, activation=h.activation)) + + # post conv + if h.activation == "snake": # periodic nonlinearity with snake function and anti-aliasing + activation_post = activations.Snake(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + elif h.activation == "snakebeta": # periodic nonlinearity with snakebeta function and anti-aliasing + activation_post = activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + self.activation_post = Activation1d(activation=activation_post) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + + # weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x): + # pre conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # post conv + x = self.activation_post(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + for l_i in l: + remove_parametrizations(l_i, 'weight') + for l in self.resblocks: + l.remove_weight_norm() + remove_parametrizations(self.conv_pre, 'weight') + remove_parametrizations(self.conv_post, 'weight') diff --git a/mmaudio/ext/bigvgan/utils.py b/mmaudio/ext/bigvgan/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..aff7e653533d3390756c53a0215801b06cc924b5 --- /dev/null +++ b/mmaudio/ext/bigvgan/utils.py @@ -0,0 +1,31 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os + +import torch +from torch.nn.utils.parametrizations import weight_norm + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print("Loading '{}'".format(filepath)) + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict diff --git a/mmaudio/ext/bigvgan_v2/LICENSE b/mmaudio/ext/bigvgan_v2/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..4c78361c86d4f685117d60d6623e2197fcfed706 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 NVIDIA CORPORATION. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/mmaudio/ext/bigvgan_v2/__init__.py b/mmaudio/ext/bigvgan_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/ext/bigvgan_v2/activations.py b/mmaudio/ext/bigvgan_v2/activations.py new file mode 100644 index 0000000000000000000000000000000000000000..4f08ddab5b55d6dcaf3e968af98889e0770c44f5 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/activations.py @@ -0,0 +1,126 @@ +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Snake(nn.Module): + """ + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + """ + super(Snake, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x + + +class SnakeBeta(nn.Module): + """ + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__( + self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False + ): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + """ + super(SnakeBeta, self).__init__() + self.in_features = in_features + + # Initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # Log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + self.beta = Parameter(torch.zeros(in_features) * alpha) + else: # Linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + self.beta = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + """ + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # Line up with x to [B, C, T] + beta = self.beta.unsqueeze(0).unsqueeze(-1) + if self.alpha_logscale: + alpha = torch.exp(alpha) + beta = torch.exp(beta) + x = x + (1.0 / (beta + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py new file mode 100644 index 0000000000000000000000000000000000000000..fbc0fd8f28a37ad949fbdb9832f51b5b933c6ff2 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/activation1d.py @@ -0,0 +1,77 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import torch +import torch.nn as nn +from alias_free_activation.torch.resample import UpSample1d, DownSample1d + +# load fused CUDA kernel: this enables importing anti_alias_activation_cuda +from alias_free_activation.cuda import load + +anti_alias_activation_cuda = load.load() + + +class FusedAntiAliasActivation(torch.autograd.Function): + """ + Assumes filter size 12, replication padding on upsampling/downsampling, and logscale alpha/beta parameters as inputs. + The hyperparameters are hard-coded in the kernel to maximize speed. + NOTE: The fused kenrel is incorrect for Activation1d with different hyperparameters. + """ + + @staticmethod + def forward(ctx, inputs, up_ftr, down_ftr, alpha, beta): + activation_results = anti_alias_activation_cuda.forward( + inputs, up_ftr, down_ftr, alpha, beta + ) + + return activation_results + + @staticmethod + def backward(ctx, output_grads): + raise NotImplementedError + return output_grads, None, None + + +class Activation1d(nn.Module): + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + fused: bool = True, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + self.fused = fused # Whether to use fused CUDA kernel or not + + def forward(self, x): + if not self.fused: + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + return x + else: + if self.act.__class__.__name__ == "Snake": + beta = self.act.alpha.data # Snake uses same params for alpha and beta + else: + beta = ( + self.act.beta.data + ) # Snakebeta uses different params for alpha and beta + alpha = self.act.alpha.data + if ( + not self.act.alpha_logscale + ): # Exp baked into cuda kernel, cancel it out with a log + alpha = torch.log(alpha) + beta = torch.log(beta) + + x = FusedAntiAliasActivation.apply( + x, self.upsample.filter, self.downsample.lowpass.filter, alpha, beta + ) + return x diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c5651f77143bd678169eb11564a7cf7a7969a59e --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation.cpp @@ -0,0 +1,23 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + #include + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward", &fwd_cuda, "Anti-Alias Activation forward (CUDA)"); +} \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..8c442334869fe72d639ec203fa4fac07f96a0ee1 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/anti_alias_activation_cuda.cu @@ -0,0 +1,246 @@ +/* coding=utf-8 + * Copyright (c) 2024, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "type_shim.h" +#include +#include +#include +#include +#include + +namespace +{ + // Hard-coded hyperparameters + // WARP_SIZE and WARP_BATCH must match the return values batches_per_warp and + constexpr int ELEMENTS_PER_LDG_STG = 1; //(WARP_ITERATIONS < 4) ? 1 : 4; + constexpr int BUFFER_SIZE = 32; + constexpr int FILTER_SIZE = 12; + constexpr int HALF_FILTER_SIZE = 6; + constexpr int UPSAMPLE_REPLICATION_PAD = 5; // 5 on each side, matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_LEFT = 5; // matching torch impl + constexpr int DOWNSAMPLE_REPLICATION_PAD_RIGHT = 6; // matching torch impl + + template + __global__ void anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + // Up and downsample filters + input_t up_filter[FILTER_SIZE]; + input_t down_filter[FILTER_SIZE]; + + // Load data from global memory including extra indices reserved for replication paddings + input_t elements[2 * FILTER_SIZE + 2 * BUFFER_SIZE + 2 * UPSAMPLE_REPLICATION_PAD] = {0}; + input_t intermediates[2 * FILTER_SIZE + 2 * BUFFER_SIZE + DOWNSAMPLE_REPLICATION_PAD_LEFT + DOWNSAMPLE_REPLICATION_PAD_RIGHT] = {0}; + + // Output stores downsampled output before writing to dst + output_t output[BUFFER_SIZE]; + + // blockDim/threadIdx = (128, 1, 1) + // gridDim/blockIdx = (seq_blocks, channels, batches) + int block_offset = (blockIdx.x * 128 * BUFFER_SIZE + seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + int local_offset = threadIdx.x * BUFFER_SIZE; + int seq_offset = blockIdx.x * 128 * BUFFER_SIZE + local_offset; + + // intermediate have double the seq_len + int intermediate_local_offset = threadIdx.x * BUFFER_SIZE * 2; + int intermediate_seq_offset = blockIdx.x * 128 * BUFFER_SIZE * 2 + intermediate_local_offset; + + // Get values needed for replication padding before moving pointer + const input_t *right_most_pntr = src + (seq_len * (blockIdx.y + gridDim.y * blockIdx.z)); + input_t seq_left_most_value = right_most_pntr[0]; + input_t seq_right_most_value = right_most_pntr[seq_len - 1]; + + // Move src and dst pointers + src += block_offset + local_offset; + dst += block_offset + local_offset; + + // Alpha and beta values for snake activatons. Applies exp by default + alpha = alpha + blockIdx.y; + input_t alpha_val = expf(alpha[0]); + beta = beta + blockIdx.y; + input_t beta_val = expf(beta[0]); + + #pragma unroll + for (int it = 0; it < FILTER_SIZE; it += 1) + { + up_filter[it] = up_ftr[it]; + down_filter[it] = down_ftr[it]; + } + + // Apply replication padding for upsampling, matching torch impl + #pragma unroll + for (int it = -HALF_FILTER_SIZE; it < BUFFER_SIZE + HALF_FILTER_SIZE; it += 1) + { + int element_index = seq_offset + it; // index for element + if ((element_index < 0) && (element_index >= -UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_left_most_value; + } + if ((element_index >= seq_len) && (element_index < seq_len + UPSAMPLE_REPLICATION_PAD)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * seq_right_most_value; + } + if ((element_index >= 0) && (element_index < seq_len)) + { + elements[2 * (HALF_FILTER_SIZE + it)] = 2 * src[it]; + } + } + + // Apply upsampling strided convolution and write to intermediates. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT for replication padding of the downsampilng conv later + #pragma unroll + for (int it = 0; it < (2 * BUFFER_SIZE + 2 * FILTER_SIZE); it += 1) + { + input_t acc = 0.0; + int element_index = intermediate_seq_offset + it; // index for intermediate + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + if ((element_index + f_idx) >= 0) + { + acc += up_filter[f_idx] * elements[it + f_idx]; + } + } + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] = acc; + } + + // Apply activation function. It reserves DOWNSAMPLE_REPLICATION_PAD_LEFT and DOWNSAMPLE_REPLICATION_PAD_RIGHT for replication padding of the downsampilng conv later + double no_div_by_zero = 0.000000001; + #pragma unroll + for (int it = 0; it < 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it += 1) + { + intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] += (1.0 / (beta_val + no_div_by_zero)) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val) * sinf(intermediates[it + DOWNSAMPLE_REPLICATION_PAD_LEFT] * alpha_val); + } + + // Apply replication padding before downsampling conv from intermediates + #pragma unroll + for (int it = 0; it < DOWNSAMPLE_REPLICATION_PAD_LEFT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT]; + } + #pragma unroll + for (int it = DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE; it < DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE + DOWNSAMPLE_REPLICATION_PAD_RIGHT; it += 1) + { + intermediates[it] = intermediates[DOWNSAMPLE_REPLICATION_PAD_LEFT + 2 * BUFFER_SIZE + 2 * FILTER_SIZE - 1]; + } + + // Apply downsample strided convolution (assuming stride=2) from intermediates + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += 1) + { + input_t acc = 0.0; + #pragma unroll + for (int f_idx = 0; f_idx < FILTER_SIZE; f_idx += 1) + { + // Add constant DOWNSAMPLE_REPLICATION_PAD_RIGHT to match torch implementation + acc += down_filter[f_idx] * intermediates[it * 2 + f_idx + DOWNSAMPLE_REPLICATION_PAD_RIGHT]; + } + output[it] = acc; + } + + // Write output to dst + #pragma unroll + for (int it = 0; it < BUFFER_SIZE; it += ELEMENTS_PER_LDG_STG) + { + int element_index = seq_offset + it; + if (element_index < seq_len) + { + dst[it] = output[it]; + } + } + + } + + template + void dispatch_anti_alias_activation_forward( + output_t *dst, + const input_t *src, + const input_t *up_ftr, + const input_t *down_ftr, + const input_t *alpha, + const input_t *beta, + int batch_size, + int channels, + int seq_len) + { + if (seq_len == 0) + { + return; + } + else + { + // Use 128 threads per block to maximimize gpu utilization + constexpr int threads_per_block = 128; + constexpr int seq_len_per_block = 4096; + int blocks_per_seq_len = (seq_len + seq_len_per_block - 1) / seq_len_per_block; + dim3 blocks(blocks_per_seq_len, channels, batch_size); + dim3 threads(threads_per_block, 1, 1); + + anti_alias_activation_forward + <<>>(dst, src, up_ftr, down_ftr, alpha, beta, batch_size, channels, seq_len); + } + } +} + +extern "C" torch::Tensor fwd_cuda(torch::Tensor const &input, torch::Tensor const &up_filter, torch::Tensor const &down_filter, torch::Tensor const &alpha, torch::Tensor const &beta) +{ + // Input is a 3d tensor with dimensions [batches, channels, seq_len] + const int batches = input.size(0); + const int channels = input.size(1); + const int seq_len = input.size(2); + + // Output + auto act_options = input.options().requires_grad(false); + + torch::Tensor anti_alias_activation_results = + torch::empty({batches, channels, seq_len}, act_options); + + void *input_ptr = static_cast(input.data_ptr()); + void *up_filter_ptr = static_cast(up_filter.data_ptr()); + void *down_filter_ptr = static_cast(down_filter.data_ptr()); + void *alpha_ptr = static_cast(alpha.data_ptr()); + void *beta_ptr = static_cast(beta.data_ptr()); + void *anti_alias_activation_results_ptr = static_cast(anti_alias_activation_results.data_ptr()); + + DISPATCH_FLOAT_HALF_AND_BFLOAT( + input.scalar_type(), + "dispatch anti alias activation_forward", + dispatch_anti_alias_activation_forward( + reinterpret_cast(anti_alias_activation_results_ptr), + reinterpret_cast(input_ptr), + reinterpret_cast(up_filter_ptr), + reinterpret_cast(down_filter_ptr), + reinterpret_cast(alpha_ptr), + reinterpret_cast(beta_ptr), + batches, + channels, + seq_len);); + return anti_alias_activation_results; +} \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h new file mode 100644 index 0000000000000000000000000000000000000000..25818b2edf4cb0dc9130e62c7c4de8d16a01baa5 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/compat.h @@ -0,0 +1,29 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/*This code is copied fron NVIDIA apex: + * https://github.com/NVIDIA/apex + * with minor changes. */ + +#ifndef TORCH_CHECK +#define TORCH_CHECK AT_CHECK +#endif + +#ifdef VERSION_GE_1_3 +#define DATA_PTR data_ptr +#else +#define DATA_PTR data +#endif diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py new file mode 100644 index 0000000000000000000000000000000000000000..ca5d01de398249e75e9e2298958764acb436edba --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/load.py @@ -0,0 +1,86 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +import os +import pathlib +import subprocess + +from torch.utils import cpp_extension + +""" +Setting this param to a list has a problem of generating different compilation commands (with diferent order of architectures) and leading to recompilation of fused kernels. +Set it to empty stringo avoid recompilation and assign arch flags explicity in extra_cuda_cflags below +""" +os.environ["TORCH_CUDA_ARCH_LIST"] = "" + + +def load(): + # Check if cuda 11 is installed for compute capability 8.0 + cc_flag = [] + _, bare_metal_major, _ = _get_cuda_bare_metal_version(cpp_extension.CUDA_HOME) + if int(bare_metal_major) >= 11: + cc_flag.append("-gencode") + cc_flag.append("arch=compute_80,code=sm_80") + + # Build path + srcpath = pathlib.Path(__file__).parent.absolute() + buildpath = srcpath / "build" + _create_build_dir(buildpath) + + # Helper function to build the kernels. + def _cpp_extention_load_helper(name, sources, extra_cuda_flags): + return cpp_extension.load( + name=name, + sources=sources, + build_directory=buildpath, + extra_cflags=[ + "-O3", + ], + extra_cuda_cflags=[ + "-O3", + "-gencode", + "arch=compute_70,code=sm_70", + "--use_fast_math", + ] + + extra_cuda_flags + + cc_flag, + verbose=True, + ) + + extra_cuda_flags = [ + "-U__CUDA_NO_HALF_OPERATORS__", + "-U__CUDA_NO_HALF_CONVERSIONS__", + "--expt-relaxed-constexpr", + "--expt-extended-lambda", + ] + + sources = [ + srcpath / "anti_alias_activation.cpp", + srcpath / "anti_alias_activation_cuda.cu", + ] + anti_alias_activation_cuda = _cpp_extention_load_helper( + "anti_alias_activation_cuda", sources, extra_cuda_flags + ) + + return anti_alias_activation_cuda + + +def _get_cuda_bare_metal_version(cuda_dir): + raw_output = subprocess.check_output( + [cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True + ) + output = raw_output.split() + release_idx = output.index("release") + 1 + release = output[release_idx].split(".") + bare_metal_major = release[0] + bare_metal_minor = release[1][0] + + return raw_output, bare_metal_major, bare_metal_minor + + +def _create_build_dir(buildpath): + try: + os.mkdir(buildpath) + except OSError: + if not os.path.isdir(buildpath): + print(f"Creation of the build directory {buildpath} failed") diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h new file mode 100644 index 0000000000000000000000000000000000000000..5db7e8a397e982d4d30d16ab6060814b98b7ab83 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/cuda/type_shim.h @@ -0,0 +1,92 @@ +/* coding=utf-8 + * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "compat.h" + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT(TYPE, NAME, ...) \ + switch (TYPE) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \ + } + +#define DISPATCH_FLOAT_HALF_AND_BFLOAT_INOUT_TYPES(TYPEIN, TYPEOUT, NAME, ...) \ + switch (TYPEIN) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_in = float; \ + switch (TYPEOUT) \ + { \ + case at::ScalarType::Float: \ + { \ + using scalar_t_out = float; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEOUT), "'"); \ + } \ + break; \ + } \ + case at::ScalarType::Half: \ + { \ + using scalar_t_in = at::Half; \ + using scalar_t_out = at::Half; \ + __VA_ARGS__; \ + break; \ + } \ + case at::ScalarType::BFloat16: \ + { \ + using scalar_t_in = at::BFloat16; \ + using scalar_t_out = at::BFloat16; \ + __VA_ARGS__; \ + break; \ + } \ + default: \ + AT_ERROR(#NAME, " not implemented for '", toString(TYPEIN), "'"); \ + } diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f756ed83f87f9839e457b240f60469bc187707d --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/__init__.py @@ -0,0 +1,6 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +from .filter import * +from .resample import * +from .act import * diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py new file mode 100644 index 0000000000000000000000000000000000000000..92445a8652d1998f80e2952224b18d0e1a89dc9f --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/act.py @@ -0,0 +1,32 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn + +from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.resample import (DownSample1d, UpSample1d) + + +class Activation1d(nn.Module): + + def __init__( + self, + activation, + up_ratio: int = 2, + down_ratio: int = 2, + up_kernel_size: int = 12, + down_kernel_size: int = 12, + ): + super().__init__() + self.up_ratio = up_ratio + self.down_ratio = down_ratio + self.act = activation + self.upsample = UpSample1d(up_ratio, up_kernel_size) + self.downsample = DownSample1d(down_ratio, down_kernel_size) + + # x: [B,C,T] + def forward(self, x): + x = self.upsample(x) + x = self.act(x) + x = self.downsample(x) + + return x diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py new file mode 100644 index 0000000000000000000000000000000000000000..0fa35b0d5ddf8d6cb04cd9d47364ca033cebcd32 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/filter.py @@ -0,0 +1,101 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch +import torch.nn as nn +import torch.nn.functional as F +import math + +if "sinc" in dir(torch): + sinc = torch.sinc +else: + # This code is adopted from adefossez's julius.core.sinc under the MIT License + # https://adefossez.github.io/julius/julius/core.html + # LICENSE is in incl_licenses directory. + def sinc(x: torch.Tensor): + """ + Implementation of sinc, i.e. sin(pi * x) / (pi * x) + __Warning__: Different to julius.sinc, the input is multiplied by `pi`! + """ + return torch.where( + x == 0, + torch.tensor(1.0, device=x.device, dtype=x.dtype), + torch.sin(math.pi * x) / math.pi / x, + ) + + +# This code is adopted from adefossez's julius.lowpass.LowPassFilters under the MIT License +# https://adefossez.github.io/julius/julius/lowpass.html +# LICENSE is in incl_licenses directory. +def kaiser_sinc_filter1d( + cutoff, half_width, kernel_size +): # return filter [1,1,kernel_size] + even = kernel_size % 2 == 0 + half_size = kernel_size // 2 + + # For kaiser window + delta_f = 4 * half_width + A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95 + if A > 50.0: + beta = 0.1102 * (A - 8.7) + elif A >= 21.0: + beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0) + else: + beta = 0.0 + window = torch.kaiser_window(kernel_size, beta=beta, periodic=False) + + # ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio + if even: + time = torch.arange(-half_size, half_size) + 0.5 + else: + time = torch.arange(kernel_size) - half_size + if cutoff == 0: + filter_ = torch.zeros_like(time) + else: + filter_ = 2 * cutoff * window * sinc(2 * cutoff * time) + """ + Normalize filter to have sum = 1, otherwise we will have a small leakage of the constant component in the input signal. + """ + filter_ /= filter_.sum() + filter = filter_.view(1, 1, kernel_size) + + return filter + + +class LowPassFilter1d(nn.Module): + def __init__( + self, + cutoff=0.5, + half_width=0.6, + stride: int = 1, + padding: bool = True, + padding_mode: str = "replicate", + kernel_size: int = 12, + ): + """ + kernel_size should be even number for stylegan3 setup, in this implementation, odd number is also possible. + """ + super().__init__() + if cutoff < -0.0: + raise ValueError("Minimum cutoff must be larger than zero.") + if cutoff > 0.5: + raise ValueError("A cutoff above 0.5 does not make sense.") + self.kernel_size = kernel_size + self.even = kernel_size % 2 == 0 + self.pad_left = kernel_size // 2 - int(self.even) + self.pad_right = kernel_size // 2 + self.stride = stride + self.padding = padding + self.padding_mode = padding_mode + filter = kaiser_sinc_filter1d(cutoff, half_width, kernel_size) + self.register_buffer("filter", filter) + + # Input [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + if self.padding: + x = F.pad(x, (self.pad_left, self.pad_right), mode=self.padding_mode) + out = F.conv1d(x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + + return out diff --git a/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py new file mode 100644 index 0000000000000000000000000000000000000000..33faa1518c3bcf34b63cc44374905df83542f614 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/alias_free_activation/torch/resample.py @@ -0,0 +1,54 @@ +# Adapted from https://github.com/junjun3518/alias-free-torch under the Apache License 2.0 +# LICENSE is in incl_licenses directory. + +import torch.nn as nn +from torch.nn import functional as F + +from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.filter import (LowPassFilter1d, + kaiser_sinc_filter1d) + + +class UpSample1d(nn.Module): + + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size) + self.stride = ratio + self.pad = self.kernel_size // ratio - 1 + self.pad_left = self.pad * self.stride + (self.kernel_size - self.stride) // 2 + self.pad_right = (self.pad * self.stride + (self.kernel_size - self.stride + 1) // 2) + filter = kaiser_sinc_filter1d(cutoff=0.5 / ratio, + half_width=0.6 / ratio, + kernel_size=self.kernel_size) + self.register_buffer("filter", filter) + + # x: [B, C, T] + def forward(self, x): + _, C, _ = x.shape + + x = F.pad(x, (self.pad, self.pad), mode="replicate") + x = self.ratio * F.conv_transpose1d( + x, self.filter.expand(C, -1, -1), stride=self.stride, groups=C) + x = x[..., self.pad_left:-self.pad_right] + + return x + + +class DownSample1d(nn.Module): + + def __init__(self, ratio=2, kernel_size=None): + super().__init__() + self.ratio = ratio + self.kernel_size = (int(6 * ratio // 2) * 2 if kernel_size is None else kernel_size) + self.lowpass = LowPassFilter1d( + cutoff=0.5 / ratio, + half_width=0.6 / ratio, + stride=ratio, + kernel_size=self.kernel_size, + ) + + def forward(self, x): + xx = self.lowpass(x) + + return xx diff --git a/mmaudio/ext/bigvgan_v2/bigvgan.py b/mmaudio/ext/bigvgan_v2/bigvgan.py new file mode 100644 index 0000000000000000000000000000000000000000..ff2b6c4c87e20d147130d0b608d2467557347caf --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/bigvgan.py @@ -0,0 +1,439 @@ +# Copyright (c) 2024 NVIDIA CORPORATION. +# Licensed under the MIT license. + +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import json +import os +from pathlib import Path +from typing import Dict, Optional, Union + +import torch +import torch.nn as nn +from huggingface_hub import PyTorchModelHubMixin, hf_hub_download +from torch.nn import Conv1d, ConvTranspose1d +from torch.nn.utils.parametrizations import weight_norm +from torch.nn.utils.parametrize import remove_parametrizations + +from mmaudio.ext.bigvgan_v2 import activations +from mmaudio.ext.bigvgan_v2.alias_free_activation.torch.act import \ + Activation1d as TorchActivation1d +from mmaudio.ext.bigvgan_v2.env import AttrDict +from mmaudio.ext.bigvgan_v2.utils import get_padding, init_weights + + +def load_hparams_from_json(path) -> AttrDict: + with open(path) as f: + data = f.read() + return AttrDict(json.loads(data)) + + +class AMPBlock1(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + AMPBlock1 has additional self.convs2 that contains additional Conv1d layers with a fixed dilation=1 followed by each layer in self.convs1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + )) for d in dilation + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=1, + padding=get_padding(kernel_size, 1), + )) for _ in range(len(dilation)) + ]) + self.convs2.apply(init_weights) + + self.num_layers = len(self.convs1) + len(self.convs2) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == "snakebeta": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + acts1, acts2 = self.activations[::2], self.activations[1::2] + for c1, c2, a1, a2 in zip(self.convs1, self.convs2, acts1, acts2): + xt = a1(x) + xt = c1(xt) + xt = a2(xt) + xt = c2(xt) + x = xt + x + + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_parametrizations(l, 'weight') + for l in self.convs2: + remove_parametrizations(l, 'weight') + + +class AMPBlock2(torch.nn.Module): + """ + AMPBlock applies Snake / SnakeBeta activation functions with trainable parameters that control periodicity, defined for each layer. + Unlike AMPBlock1, AMPBlock2 does not contain extra Conv1d layers with fixed dilation=1 + + Args: + h (AttrDict): Hyperparameters. + channels (int): Number of convolution channels. + kernel_size (int): Size of the convolution kernel. Default is 3. + dilation (tuple): Dilation rates for the convolutions. Each dilation layer has two convolutions. Default is (1, 3, 5). + activation (str): Activation function type. Should be either 'snake' or 'snakebeta'. Default is None. + """ + + def __init__( + self, + h: AttrDict, + channels: int, + kernel_size: int = 3, + dilation: tuple = (1, 3, 5), + activation: str = None, + ): + super().__init__() + + self.h = h + + self.convs = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + stride=1, + dilation=d, + padding=get_padding(kernel_size, d), + )) for d in dilation + ]) + self.convs.apply(init_weights) + + self.num_layers = len(self.convs) # Total number of conv layers + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + # Activation functions + if activation == "snake": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.Snake(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + elif activation == "snakebeta": + self.activations = nn.ModuleList([ + Activation1d( + activation=activations.SnakeBeta(channels, alpha_logscale=h.snake_logscale)) + for _ in range(self.num_layers) + ]) + else: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + def forward(self, x): + for c, a in zip(self.convs, self.activations): + xt = a(x) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class BigVGAN( + torch.nn.Module, + PyTorchModelHubMixin, + library_name="bigvgan", + repo_url="https://github.com/NVIDIA/BigVGAN", + docs_url="https://github.com/NVIDIA/BigVGAN/blob/main/README.md", + pipeline_tag="audio-to-audio", + license="mit", + tags=["neural-vocoder", "audio-generation", "arxiv:2206.04658"], +): + """ + BigVGAN is a neural vocoder model that applies anti-aliased periodic activation for residual blocks (resblocks). + New in BigVGAN-v2: it can optionally use optimized CUDA kernels for AMP (anti-aliased multi-periodicity) blocks. + + Args: + h (AttrDict): Hyperparameters. + use_cuda_kernel (bool): If set to True, loads optimized CUDA kernels for AMP. This should be used for inference only, as training is not supported with CUDA kernels. + + Note: + - The `use_cuda_kernel` parameter should be used for inference only, as training with CUDA kernels is not supported. + - Ensure that the activation function is correctly specified in the hyperparameters (h.activation). + """ + + def __init__(self, h: AttrDict, use_cuda_kernel: bool = False): + super().__init__() + self.h = h + self.h["use_cuda_kernel"] = use_cuda_kernel + + # Select which Activation1d, lazy-load cuda version to ensure backward compatibility + if self.h.get("use_cuda_kernel", False): + from alias_free_activation.cuda.activation1d import \ + Activation1d as CudaActivation1d + + Activation1d = CudaActivation1d + else: + Activation1d = TorchActivation1d + + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + + # Pre-conv + self.conv_pre = weight_norm(Conv1d(h.num_mels, h.upsample_initial_channel, 7, 1, padding=3)) + + # Define which AMPBlock to use. BigVGAN uses AMPBlock1 as default + if h.resblock == "1": + resblock_class = AMPBlock1 + elif h.resblock == "2": + resblock_class = AMPBlock2 + else: + raise ValueError( + f"Incorrect resblock class specified in hyperparameters. Got {h.resblock}") + + # Transposed conv-based upsamplers. does not apply anti-aliasing + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + nn.ModuleList([ + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + padding=(k - u) // 2, + )) + ])) + + # Residual blocks using anti-aliased multi-periodicity composition modules (AMP) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock_class(h, ch, k, d, activation=h.activation)) + + # Post-conv + activation_post = (activations.Snake(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snake" else + (activations.SnakeBeta(ch, alpha_logscale=h.snake_logscale) + if h.activation == "snakebeta" else None)) + if activation_post is None: + raise NotImplementedError( + "activation incorrectly specified. check the config file and look for 'activation'." + ) + + self.activation_post = Activation1d(activation=activation_post) + + # Whether to use bias for the final conv_post. Default to True for backward compatibility + self.use_bias_at_final = h.get("use_bias_at_final", True) + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3, bias=self.use_bias_at_final)) + + # Weight initialization + for i in range(len(self.ups)): + self.ups[i].apply(init_weights) + self.conv_post.apply(init_weights) + + # Final tanh activation. Defaults to True for backward compatibility + self.use_tanh_at_final = h.get("use_tanh_at_final", True) + + def forward(self, x): + # Pre-conv + x = self.conv_pre(x) + + for i in range(self.num_upsamples): + # Upsampling + for i_up in range(len(self.ups[i])): + x = self.ups[i][i_up](x) + # AMP blocks + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + # Post-conv + x = self.activation_post(x) + x = self.conv_post(x) + # Final tanh activation + if self.use_tanh_at_final: + x = torch.tanh(x) + else: + x = torch.clamp(x, min=-1.0, max=1.0) # Bound the output to [-1, 1] + + return x + + def remove_weight_norm(self): + try: + print("Removing weight norm...") + for l in self.ups: + for l_i in l: + remove_parametrizations(l_i, 'weight') + for l in self.resblocks: + l.remove_weight_norm() + remove_parametrizations(self.conv_pre, 'weight') + remove_parametrizations(self.conv_post, 'weight') + except ValueError: + print("[INFO] Model already removed weight norm. Skipping!") + pass + + # Additional methods for huggingface_hub support + def _save_pretrained(self, save_directory: Path) -> None: + """Save weights and config.json from a Pytorch model to a local directory.""" + + model_path = save_directory / "bigvgan_generator.pt" + torch.save({"generator": self.state_dict()}, model_path) + + config_path = save_directory / "config.json" + with open(config_path, "w") as config_file: + json.dump(self.h, config_file, indent=4) + + @classmethod + def _from_pretrained( + cls, + *, + model_id: str, + revision: str, + cache_dir: str, + force_download: bool, + proxies: Optional[Dict], + resume_download: bool, + local_files_only: bool, + token: Union[str, bool, None], + map_location: str = "cpu", # Additional argument + strict: bool = False, # Additional argument + use_cuda_kernel: bool = False, + **model_kwargs, + ): + """Load Pytorch pretrained weights and return the loaded model.""" + + # Download and load hyperparameters (h) used by BigVGAN + if os.path.isdir(model_id): + print("Loading config.json from local directory") + config_file = os.path.join(model_id, "config.json") + else: + config_file = hf_hub_download( + repo_id=model_id, + filename="config.json", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + h = load_hparams_from_json(config_file) + + # instantiate BigVGAN using h + if use_cuda_kernel: + print( + f"[WARNING] You have specified use_cuda_kernel=True during BigVGAN.from_pretrained(). Only inference is supported (training is not implemented)!" + ) + print( + f"[WARNING] You need nvcc and ninja installed in your system that matches your PyTorch build is using to build the kernel. If not, the model will fail to initialize or generate incorrect waveform!" + ) + print( + f"[WARNING] For detail, see the official GitHub repository: https://github.com/NVIDIA/BigVGAN?tab=readme-ov-file#using-custom-cuda-kernel-for-synthesis" + ) + model = cls(h, use_cuda_kernel=use_cuda_kernel) + + # Download and load pretrained generator weight + if os.path.isdir(model_id): + print("Loading weights from local directory") + model_file = os.path.join(model_id, "bigvgan_generator.pt") + else: + print(f"Loading weights from {model_id}") + model_file = hf_hub_download( + repo_id=model_id, + filename="bigvgan_generator.pt", + revision=revision, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + token=token, + local_files_only=local_files_only, + ) + + checkpoint_dict = torch.load(model_file, map_location=map_location, weights_only=True) + + try: + model.load_state_dict(checkpoint_dict["generator"]) + except RuntimeError: + print( + f"[INFO] the pretrained checkpoint does not contain weight norm. Loading the checkpoint after removing weight norm!" + ) + model.remove_weight_norm() + model.load_state_dict(checkpoint_dict["generator"]) + + return model diff --git a/mmaudio/ext/bigvgan_v2/env.py b/mmaudio/ext/bigvgan_v2/env.py new file mode 100644 index 0000000000000000000000000000000000000000..b8be238d4db710c8c9a338d336baea0138f18d1f --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/env.py @@ -0,0 +1,18 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 new file mode 100644 index 0000000000000000000000000000000000000000..5afae394d6b37da0e12ba6b290d2512687f421ac --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_1 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 new file mode 100644 index 0000000000000000000000000000000000000000..322b758863c4219be68291ae3826218baa93cb4c --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_2 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Edward Dixon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 new file mode 100644 index 0000000000000000000000000000000000000000..56ee3c8c4cc2b4b32e0975d17258f9ba515fdbcc --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_3 @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 new file mode 100644 index 0000000000000000000000000000000000000000..48fd1a1ba8d81a94b6c7d1c2ff1a1f307cc5371d --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_4 @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2019, Seungwon Park 박승원 +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 new file mode 100644 index 0000000000000000000000000000000000000000..01ae5538e6b7c787bb4f5d6f2cd9903520d6e465 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_5 @@ -0,0 +1,16 @@ +Copyright 2020 Alexandre Défossez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and +associated documentation files (the "Software"), to deal in the Software without restriction, +including without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT +NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, +DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 new file mode 100644 index 0000000000000000000000000000000000000000..2569ec0b6c85f94f3cd071ba16e9028ccf156be2 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_6 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023-present, Descript + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 new file mode 100644 index 0000000000000000000000000000000000000000..c37bdaf99c6921f5849425d546069e972f52d7fa --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_7 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Charactr Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 new file mode 100644 index 0000000000000000000000000000000000000000..ab3d7ffe795779f54e339078e4e752ad9019aae8 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/incl_licenses/LICENSE_8 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Amphion + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/mmaudio/ext/bigvgan_v2/utils.py b/mmaudio/ext/bigvgan_v2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3b1d41670fa1ee257b2ed22c61086ba7a32c7cb0 --- /dev/null +++ b/mmaudio/ext/bigvgan_v2/utils.py @@ -0,0 +1,31 @@ +# Adapted from https://github.com/jik876/hifi-gan under the MIT license. +# LICENSE is in incl_licenses directory. + +import os + +import torch +from torch.nn.utils import weight_norm + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict diff --git a/mmaudio/ext/mel_converter.py b/mmaudio/ext/mel_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..6fc589c9468e077fc580965db250fd502e229672 --- /dev/null +++ b/mmaudio/ext/mel_converter.py @@ -0,0 +1,82 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class MelConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 80, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log10, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.mel_basis.device + + def forward(self, waveform: torch.Tensor, center: bool = False) -> torch.Tensor: + waveform = waveform.clamp(min=-1., max=1.).to(self.device) + + waveform = torch.nn.functional.pad( + waveform.unsqueeze(1), + [int((self.n_fft - self.hop_size) / 2), + int((self.n_fft - self.hop_size) / 2)], + mode='reflect') + waveform = waveform.squeeze(1) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=center, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + spec = torch.matmul(self.mel_basis, spec) + spec = spectral_normalize_torch(spec, self.norm_fn) + + return spec diff --git a/mmaudio/ext/rotary_embeddings.py b/mmaudio/ext/rotary_embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..1ea9d56278cb68b7577ed13148227c30ed98fd02 --- /dev/null +++ b/mmaudio/ext/rotary_embeddings.py @@ -0,0 +1,35 @@ +from typing import Union + +import torch +from einops import rearrange +from torch import Tensor + +# Ref: https://github.com/black-forest-labs/flux/blob/main/src/flux/math.py +# Ref: https://github.com/lucidrains/rotary-embedding-torch + + +def compute_rope_rotations(length: int, + dim: int, + theta: int, + *, + freq_scaling: float = 1.0, + device: Union[torch.device, str] = 'cpu') -> Tensor: + assert dim % 2 == 0 + + with torch.amp.autocast(device_type='cuda', enabled=False): + pos = torch.arange(length, dtype=torch.float32, device=device) + freqs = 1.0 / (theta**(torch.arange(0, dim, 2, dtype=torch.float32, device=device) / dim)) + freqs *= freq_scaling + + rot = torch.einsum('..., f -> ... f', pos, freqs) + rot = torch.stack([torch.cos(rot), -torch.sin(rot), torch.sin(rot), torch.cos(rot)], dim=-1) + rot = rearrange(rot, 'n d (i j) -> 1 n d i j', i=2, j=2) + return rot + + +def apply_rope(x: Tensor, rot: Tensor) -> tuple[Tensor, Tensor]: + with torch.amp.autocast(device_type='cuda', enabled=False): + _x = x.float() + _x = _x.view(*_x.shape[:-1], -1, 1, 2) + x_out = rot[..., 0] * _x[..., 0] + rot[..., 1] * _x[..., 1] + return x_out.reshape(*x.shape).to(dtype=x.dtype) diff --git a/mmaudio/ext/stft_converter.py b/mmaudio/ext/stft_converter.py new file mode 100644 index 0000000000000000000000000000000000000000..62922067ef3b1d3b8727ec39e7d664ccb304d9fe --- /dev/null +++ b/mmaudio/ext/stft_converter.py @@ -0,0 +1,183 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +import torchaudio +from einops import rearrange +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class STFTConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 128, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.hann_window.device + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + # input: batch_size * length + bs = waveform.shape[0] + waveform = waveform.clamp(min=-1., max=1.) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + # print('After stft', spec.shape, spec.min(), spec.max(), spec.mean()) + + power = spec.pow(2).sum(-1) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power', power.shape, power.min(), power.max(), power.mean()) + print('angle', angle.shape, angle.min(), angle.max(), angle.mean()) + + # print('mel', self.mel_basis.shape, self.mel_basis.min(), self.mel_basis.max(), + # self.mel_basis.mean()) + + # spec = rearrange(spec, 'b f t c -> (b c) f t') + + # spec = self.mel_transform(spec) + + # spec = torch.matmul(self.mel_basis, spec) + + # print('After mel', spec.shape, spec.min(), spec.max(), spec.mean()) + + # spec = spectral_normalize_torch(spec, self.norm_fn) + + # print('After norm', spec.shape, spec.min(), spec.max(), spec.mean()) + + # compute magnitude + # magnitude = torch.sqrt((spec**2).sum(-1)) + # normalize by magnitude + # scaled_magnitude = torch.log10(magnitude.clamp(min=1e-5)) * 10 + # spec = spec / magnitude.unsqueeze(-1) * scaled_magnitude.unsqueeze(-1) + + # power = torch.log10(power.clamp(min=1e-5)) * 10 + power = torch.log10(power.clamp(min=1e-5)) + + print('After scaling', power.shape, power.min(), power.max(), power.mean()) + + spec = torch.stack([power, angle], dim=-1) + + # spec = rearrange(spec, '(b c) f t -> b c f t', b=bs) + spec = rearrange(spec, 'b f t c -> b c f t', b=bs) + + # spec[:, :, 400:] = 0 + + return spec + + def invert(self, spec: torch.Tensor, length: int) -> torch.Tensor: + bs = spec.shape[0] + + # spec = rearrange(spec, 'b c f t -> (b c) f t') + # print(spec.shape, self.mel_basis.shape) + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + # spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + + # spec = self.invmel_transform(spec) + + spec = rearrange(spec, 'b c f t -> b f t c', b=bs).contiguous() + + # spec[..., 0] = 10**(spec[..., 0] / 10) + + power = spec[..., 0] + power = 10**power + + # print('After unscaling', spec[..., 0].shape, spec[..., 0].min(), spec[..., 0].max(), + # spec[..., 0].mean()) + + unit_vector = torch.stack([ + torch.cos(spec[..., 1]), + torch.sin(spec[..., 1]), + ], dim=-1) + + spec = torch.sqrt(power) * unit_vector + + # spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + spec = torch.view_as_complex(spec) + + waveform = torch.istft( + spec, + self.n_fft, + length=length, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + normalized=False, + onesided=True, + return_complex=False, + ) + + return waveform + + +if __name__ == '__main__': + + converter = STFTConverter(sampling_rate=16000) + + signal = torchaudio.load('./output/ZZ6GRocWW38_000090.wav')[0] + # resample signal at 44100 Hz + # signal = torchaudio.transforms.Resample(16_000, 44_100)(signal) + + L = signal.shape[1] + print('Input signal', signal.shape) + spec = converter(signal) + + print('Final spec', spec.shape) + + signal_recon = converter.invert(spec, length=L) + print('Output signal', signal_recon.shape, signal_recon.min(), signal_recon.max(), + signal_recon.mean()) + + print('MSE', torch.nn.functional.mse_loss(signal, signal_recon)) + torchaudio.save('./output/ZZ6GRocWW38_000090_recon.wav', signal_recon, 16000) diff --git a/mmaudio/ext/stft_converter_mel.py b/mmaudio/ext/stft_converter_mel.py new file mode 100644 index 0000000000000000000000000000000000000000..f6b32d4cb9a23cd74f723e7d8307fd82fa1abba0 --- /dev/null +++ b/mmaudio/ext/stft_converter_mel.py @@ -0,0 +1,234 @@ +# Reference: # https://github.com/bytedance/Make-An-Audio-2 + +import torch +import torch.nn as nn +import torchaudio +from einops import rearrange +from librosa.filters import mel as librosa_mel_fn + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5, norm_fn=torch.log10): + return norm_fn(torch.clamp(x, min=clip_val) * C) + + +def spectral_normalize_torch(magnitudes, norm_fn): + output = dynamic_range_compression_torch(magnitudes, norm_fn=norm_fn) + return output + + +class STFTConverter(nn.Module): + + def __init__( + self, + *, + sampling_rate: float = 16_000, + n_fft: int = 1024, + num_mels: int = 128, + hop_size: int = 256, + win_size: int = 1024, + fmin: float = 0, + fmax: float = 8_000, + norm_fn=torch.log, + ): + super().__init__() + self.sampling_rate = sampling_rate + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.norm_fn = norm_fn + + mel = librosa_mel_fn(sr=self.sampling_rate, + n_fft=self.n_fft, + n_mels=self.num_mels, + fmin=self.fmin, + fmax=self.fmax) + mel_basis = torch.from_numpy(mel).float() + hann_window = torch.hann_window(self.win_size) + + self.register_buffer('mel_basis', mel_basis) + self.register_buffer('hann_window', hann_window) + + @property + def device(self): + return self.hann_window.device + + def forward(self, waveform: torch.Tensor) -> torch.Tensor: + # input: batch_size * length + bs = waveform.shape[0] + waveform = waveform.clamp(min=-1., max=1.) + + spec = torch.stft(waveform, + self.n_fft, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + pad_mode='reflect', + normalized=False, + onesided=True, + return_complex=True) + + spec = torch.view_as_real(spec) + # print('After stft', spec.shape, spec.min(), spec.max(), spec.mean()) + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power 1', power.shape, power.min(), power.max(), power.mean()) + print('angle 1', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # print('mel', self.mel_basis.shape, self.mel_basis.min(), self.mel_basis.max(), + # self.mel_basis.mean()) + + # spec = self.mel_transform(spec) + + # power = torch.matmul(self.mel_basis, power) + + spec = rearrange(spec, 'b f t c -> (b c) f t') + spec = self.mel_basis.unsqueeze(0) @ spec + spec = rearrange(spec, '(b c) f t -> b f t c', b=bs) + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power', power.shape, power.min(), power.max(), power.mean()) + print('angle', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # print('After mel', spec.shape, spec.min(), spec.max(), spec.mean()) + + # spec = spectral_normalize_torch(spec, self.norm_fn) + + # print('After norm', spec.shape, spec.min(), spec.max(), spec.mean()) + + # compute magnitude + # magnitude = torch.sqrt((spec**2).sum(-1)) + # normalize by magnitude + # scaled_magnitude = torch.log10(magnitude.clamp(min=1e-5)) * 10 + # spec = spec / magnitude.unsqueeze(-1) * scaled_magnitude.unsqueeze(-1) + + # power = torch.log10(power.clamp(min=1e-5)) * 10 + power = torch.log10(power.clamp(min=1e-8)) + + print('After scaling', power.shape, power.min(), power.max(), power.mean()) + + # spec = torch.stack([power, angle], dim=-1) + + # spec = rearrange(spec, '(b c) f t -> b c f t', b=bs) + # spec = rearrange(spec, 'b f t c -> b c f t', b=bs) + + # spec[:, :, 400:] = 0 + + return power, angle + # return spec[..., 0], spec[..., 1] + + def invert(self, spec: torch.Tensor, length: int) -> torch.Tensor: + + power, angle = spec + + bs = power.shape[0] + + # spec = rearrange(spec, 'b c f t -> (b c) f t') + # print(spec.shape, self.mel_basis.shape) + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + # spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + + # spec = self.invmel_transform(spec) + + # spec = rearrange(spec, 'b c f t -> b f t c', b=bs).contiguous() + + # spec[..., 0] = 10**(spec[..., 0] / 10) + + # power = spec[..., 0] + power = 10**power + + # print('After unscaling', spec[..., 0].shape, spec[..., 0].min(), spec[..., 0].max(), + # spec[..., 0].mean()) + + unit_vector = torch.stack([ + torch.cos(angle), + torch.sin(angle), + ], dim=-1) + + spec = power.unsqueeze(-1) * unit_vector + + # power = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), power).solution + spec = rearrange(spec, 'b f t c -> (b c) f t') + spec = torch.linalg.pinv(self.mel_basis.unsqueeze(0)) @ spec + # spec = torch.linalg.lstsq(self.mel_basis.unsqueeze(0), spec).solution + spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + + power = (spec.pow(2).sum(-1))**(0.5) + angle = torch.atan2(spec[..., 1], spec[..., 0]) + + print('power 2', power.shape, power.min(), power.max(), power.mean()) + print('angle 2', angle.shape, angle.min(), angle.max(), angle.mean(), angle[:, :2, :2]) + + # spec = rearrange(spec, '(b c) f t -> b f t c', b=bs).contiguous() + spec = torch.view_as_complex(spec) + + waveform = torch.istft( + spec, + self.n_fft, + length=length, + hop_length=self.hop_size, + win_length=self.win_size, + window=self.hann_window, + center=True, + normalized=False, + onesided=True, + return_complex=False, + ) + + return waveform + + +if __name__ == '__main__': + + converter = STFTConverter(sampling_rate=16000) + + signal = torchaudio.load('./output/ZZ6GRocWW38_000090.wav')[0] + # resample signal at 44100 Hz + # signal = torchaudio.transforms.Resample(16_000, 44_100)(signal) + + L = signal.shape[1] + print('Input signal', signal.shape) + spec = converter(signal) + + power, angle = spec + + # print(power.shape, angle.shape) + # print(power, power.min(), power.max(), power.mean()) + # power = power.clamp(-1, 1) + # angle = angle.clamp(-1, 1) + + import matplotlib.pyplot as plt + + # Visualize power + plt.figure() + plt.imshow(power[0].detach().numpy(), aspect='auto', origin='lower') + plt.colorbar() + plt.title('Power') + plt.xlabel('Time') + plt.ylabel('Frequency') + plt.savefig('./output/power.png') + + # Visualize angle + plt.figure() + plt.imshow(angle[0].detach().numpy(), aspect='auto', origin='lower') + plt.colorbar() + plt.title('Angle') + plt.xlabel('Time') + plt.ylabel('Frequency') + plt.savefig('./output/angle.png') + + # print('Final spec', spec.shape) + + signal_recon = converter.invert(spec, length=L) + print('Output signal', signal_recon.shape, signal_recon.min(), signal_recon.max(), + signal_recon.mean()) + + print('MSE', torch.nn.functional.mse_loss(signal, signal_recon)) + torchaudio.save('./output/ZZ6GRocWW38_000090_recon.wav', signal_recon, 16000) diff --git a/mmaudio/ext/synchformer/LICENSE b/mmaudio/ext/synchformer/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2f70bf24b6f45f458998bdf5746376c4832352ea --- /dev/null +++ b/mmaudio/ext/synchformer/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Vladimir Iashin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/mmaudio/ext/synchformer/__init__.py b/mmaudio/ext/synchformer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3aa1c4b6464593722e557505d721f3ca5e05f4e8 --- /dev/null +++ b/mmaudio/ext/synchformer/__init__.py @@ -0,0 +1 @@ +from mmaudio.ext.synchformer.synchformer import Synchformer diff --git a/mmaudio/ext/synchformer/divided_224_16x4.yaml b/mmaudio/ext/synchformer/divided_224_16x4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f9d20b76302a8af7928391643bd4b2d184e970aa --- /dev/null +++ b/mmaudio/ext/synchformer/divided_224_16x4.yaml @@ -0,0 +1,84 @@ +TRAIN: + ENABLE: True + DATASET: Ssv2 + BATCH_SIZE: 32 + EVAL_PERIOD: 5 + CHECKPOINT_PERIOD: 5 + AUTO_RESUME: True + CHECKPOINT_EPOCH_RESET: True + CHECKPOINT_FILE_PATH: /checkpoint/fmetze/neurips_sota/40944587/checkpoints/checkpoint_epoch_00035.pyth +DATA: + NUM_FRAMES: 16 + SAMPLING_RATE: 4 + TRAIN_JITTER_SCALES: [256, 320] + TRAIN_CROP_SIZE: 224 + TEST_CROP_SIZE: 224 + INPUT_CHANNEL_NUM: [3] + MEAN: [0.5, 0.5, 0.5] + STD: [0.5, 0.5, 0.5] + PATH_TO_DATA_DIR: /private/home/mandelapatrick/slowfast/data/ssv2 + PATH_PREFIX: /datasets01/SomethingV2/092720/20bn-something-something-v2-frames + INV_UNIFORM_SAMPLE: True + RANDOM_FLIP: False + REVERSE_INPUT_CHANNEL: True + USE_RAND_AUGMENT: True + RE_PROB: 0.0 + USE_REPEATED_AUG: False + USE_RANDOM_RESIZE_CROPS: False + COLORJITTER: False + GRAYSCALE: False + GAUSSIAN: False +SOLVER: + BASE_LR: 1e-4 + LR_POLICY: steps_with_relative_lrs + LRS: [1, 0.1, 0.01] + STEPS: [0, 20, 30] + MAX_EPOCH: 35 + MOMENTUM: 0.9 + WEIGHT_DECAY: 5e-2 + WARMUP_EPOCHS: 0.0 + OPTIMIZING_METHOD: adamw + USE_MIXED_PRECISION: True + SMOOTHING: 0.2 +SLOWFAST: + ALPHA: 8 +VIT: + PATCH_SIZE: 16 + PATCH_SIZE_TEMP: 2 + CHANNELS: 3 + EMBED_DIM: 768 + DEPTH: 12 + NUM_HEADS: 12 + MLP_RATIO: 4 + QKV_BIAS: True + VIDEO_INPUT: True + TEMPORAL_RESOLUTION: 8 + USE_MLP: True + DROP: 0.0 + POS_DROPOUT: 0.0 + DROP_PATH: 0.2 + IM_PRETRAINED: True + HEAD_DROPOUT: 0.0 + HEAD_ACT: tanh + PRETRAINED_WEIGHTS: vit_1k + ATTN_LAYER: divided +MODEL: + NUM_CLASSES: 174 + ARCH: slow + MODEL_NAME: VisionTransformer + LOSS_FUNC: cross_entropy +TEST: + ENABLE: True + DATASET: Ssv2 + BATCH_SIZE: 64 + NUM_ENSEMBLE_VIEWS: 1 + NUM_SPATIAL_CROPS: 3 +DATA_LOADER: + NUM_WORKERS: 4 + PIN_MEMORY: True +NUM_GPUS: 8 +NUM_SHARDS: 4 +RNG_SEED: 0 +OUTPUT_DIR: . +TENSORBOARD: + ENABLE: True diff --git a/mmaudio/ext/synchformer/motionformer.py b/mmaudio/ext/synchformer/motionformer.py new file mode 100644 index 0000000000000000000000000000000000000000..f02141e7cf3a3a133553b6a25341b4b68a483de4 --- /dev/null +++ b/mmaudio/ext/synchformer/motionformer.py @@ -0,0 +1,400 @@ +import logging +from pathlib import Path + +import einops +import torch +from omegaconf import OmegaConf +from timm.layers import trunc_normal_ +from torch import nn + +from mmaudio.ext.synchformer.utils import check_if_file_exists_else_download +from mmaudio.ext.synchformer.video_model_builder import VisionTransformer + +FILE2URL = { + # cfg + 'motionformer_224_16x4.yaml': + 'https://raw.githubusercontent.com/facebookresearch/Motionformer/bf43d50/configs/SSV2/motionformer_224_16x4.yaml', + 'joint_224_16x4.yaml': + 'https://raw.githubusercontent.com/facebookresearch/Motionformer/bf43d50/configs/SSV2/joint_224_16x4.yaml', + 'divided_224_16x4.yaml': + 'https://raw.githubusercontent.com/facebookresearch/Motionformer/bf43d50/configs/SSV2/divided_224_16x4.yaml', + # ckpt + 'ssv2_motionformer_224_16x4.pyth': + 'https://dl.fbaipublicfiles.com/motionformer/ssv2_motionformer_224_16x4.pyth', + 'ssv2_joint_224_16x4.pyth': + 'https://dl.fbaipublicfiles.com/motionformer/ssv2_joint_224_16x4.pyth', + 'ssv2_divided_224_16x4.pyth': + 'https://dl.fbaipublicfiles.com/motionformer/ssv2_divided_224_16x4.pyth', +} + + +class MotionFormer(VisionTransformer): + ''' This class serves three puposes: + 1. Renames the class to MotionFormer. + 2. Downloads the cfg from the original repo and patches it if needed. + 3. Takes care of feature extraction by redefining .forward() + - if `extract_features=True` and `factorize_space_time=False`, + the output is of shape (B, T, D) where T = 1 + (224 // 16) * (224 // 16) * 8 + - if `extract_features=True` and `factorize_space_time=True`, the output is of shape (B*S, D) + and spatial and temporal transformer encoder layers are used. + - if `extract_features=True` and `factorize_space_time=True` as well as `add_global_repr=True` + the output is of shape (B, D) and spatial and temporal transformer encoder layers + are used as well as the global representation is extracted from segments (extra pos emb + is added). + ''' + + def __init__( + self, + extract_features: bool = False, + ckpt_path: str = None, + factorize_space_time: bool = None, + agg_space_module: str = None, + agg_time_module: str = None, + add_global_repr: bool = True, + agg_segments_module: str = None, + max_segments: int = None, + ): + self.extract_features = extract_features + self.ckpt_path = ckpt_path + self.factorize_space_time = factorize_space_time + + if self.ckpt_path is not None: + check_if_file_exists_else_download(self.ckpt_path, FILE2URL) + ckpt = torch.load(self.ckpt_path, map_location='cpu') + mformer_ckpt2cfg = { + 'ssv2_motionformer_224_16x4.pyth': 'motionformer_224_16x4.yaml', + 'ssv2_joint_224_16x4.pyth': 'joint_224_16x4.yaml', + 'ssv2_divided_224_16x4.pyth': 'divided_224_16x4.yaml', + } + # init from motionformer ckpt or from our Stage I ckpt + # depending on whether the feat extractor was pre-trained on AVCLIPMoCo or not, we need to + # load the state dict differently + was_pt_on_avclip = self.ckpt_path.endswith( + '.pt') # checks if it is a stage I ckpt (FIXME: a bit generic) + if self.ckpt_path.endswith(tuple(mformer_ckpt2cfg.keys())): + cfg_fname = mformer_ckpt2cfg[Path(self.ckpt_path).name] + elif was_pt_on_avclip: + # TODO: this is a hack, we should be able to get the cfg from the ckpt (earlier ckpt didn't have it) + s1_cfg = ckpt.get('args', None) # Stage I cfg + if s1_cfg is not None: + s1_vfeat_extractor_ckpt_path = s1_cfg.model.params.vfeat_extractor.params.ckpt_path + # if the stage I ckpt was initialized from a motionformer ckpt or train from scratch + if s1_vfeat_extractor_ckpt_path is not None: + cfg_fname = mformer_ckpt2cfg[Path(s1_vfeat_extractor_ckpt_path).name] + else: + cfg_fname = 'divided_224_16x4.yaml' + else: + cfg_fname = 'divided_224_16x4.yaml' + else: + raise ValueError(f'ckpt_path {self.ckpt_path} is not supported.') + else: + was_pt_on_avclip = False + cfg_fname = 'divided_224_16x4.yaml' + # logging.info(f'No ckpt_path provided, using {cfg_fname} config.') + + if cfg_fname in ['motionformer_224_16x4.yaml', 'divided_224_16x4.yaml']: + pos_emb_type = 'separate' + elif cfg_fname == 'joint_224_16x4.yaml': + pos_emb_type = 'joint' + + self.mformer_cfg_path = Path(__file__).absolute().parent / cfg_fname + + check_if_file_exists_else_download(self.mformer_cfg_path, FILE2URL) + mformer_cfg = OmegaConf.load(self.mformer_cfg_path) + logging.info(f'Loading MotionFormer config from {self.mformer_cfg_path.absolute()}') + + # patch the cfg (from the default cfg defined in the repo `Motionformer/slowfast/config/defaults.py`) + mformer_cfg.VIT.ATTN_DROPOUT = 0.0 + mformer_cfg.VIT.POS_EMBED = pos_emb_type + mformer_cfg.VIT.USE_ORIGINAL_TRAJ_ATTN_CODE = True + mformer_cfg.VIT.APPROX_ATTN_TYPE = 'none' # guessing + mformer_cfg.VIT.APPROX_ATTN_DIM = 64 # from ckpt['cfg'] + + # finally init VisionTransformer with the cfg + super().__init__(mformer_cfg) + + # load the ckpt now if ckpt is provided and not from AVCLIPMoCo-pretrained ckpt + if (self.ckpt_path is not None) and (not was_pt_on_avclip): + _ckpt_load_status = self.load_state_dict(ckpt['model_state'], strict=False) + if len(_ckpt_load_status.missing_keys) > 0 or len( + _ckpt_load_status.unexpected_keys) > 0: + logging.warning(f'Loading exact vfeat_extractor ckpt from {self.ckpt_path} failed.' \ + f'Missing keys: {_ckpt_load_status.missing_keys}, ' \ + f'Unexpected keys: {_ckpt_load_status.unexpected_keys}') + else: + logging.info(f'Loading vfeat_extractor ckpt from {self.ckpt_path} succeeded.') + + if self.extract_features: + assert isinstance(self.norm, + nn.LayerNorm), 'early x[:, 1:, :] may not be safe for per-tr weights' + # pre-logits are Sequential(nn.Linear(emb, emd), act) and `act` is tanh but see the logger + self.pre_logits = nn.Identity() + # we don't need the classification head (saving memory) + self.head = nn.Identity() + self.head_drop = nn.Identity() + # avoiding code duplication (used only if agg_*_module is TransformerEncoderLayer) + transf_enc_layer_kwargs = dict( + d_model=self.embed_dim, + nhead=self.num_heads, + activation=nn.GELU(), + batch_first=True, + dim_feedforward=self.mlp_ratio * self.embed_dim, + dropout=self.drop_rate, + layer_norm_eps=1e-6, + norm_first=True, + ) + # define adapters if needed + if self.factorize_space_time: + if agg_space_module == 'TransformerEncoderLayer': + self.spatial_attn_agg = SpatialTransformerEncoderLayer( + **transf_enc_layer_kwargs) + elif agg_space_module == 'AveragePooling': + self.spatial_attn_agg = AveragePooling(avg_pattern='BS D t h w -> BS D t', + then_permute_pattern='BS D t -> BS t D') + if agg_time_module == 'TransformerEncoderLayer': + self.temp_attn_agg = TemporalTransformerEncoderLayer(**transf_enc_layer_kwargs) + elif agg_time_module == 'AveragePooling': + self.temp_attn_agg = AveragePooling(avg_pattern='BS t D -> BS D') + elif 'Identity' in agg_time_module: + self.temp_attn_agg = nn.Identity() + # define a global aggregation layer (aggregarate over segments) + self.add_global_repr = add_global_repr + if add_global_repr: + if agg_segments_module == 'TransformerEncoderLayer': + # we can reuse the same layer as for temporal factorization (B, dim_to_agg, D) -> (B, D) + # we need to add pos emb (PE) because previously we added the same PE for each segment + pos_max_len = max_segments if max_segments is not None else 16 # 16 = 10sec//0.64sec + 1 + self.global_attn_agg = TemporalTransformerEncoderLayer( + add_pos_emb=True, + pos_emb_drop=mformer_cfg.VIT.POS_DROPOUT, + pos_max_len=pos_max_len, + **transf_enc_layer_kwargs) + elif agg_segments_module == 'AveragePooling': + self.global_attn_agg = AveragePooling(avg_pattern='B S D -> B D') + + if was_pt_on_avclip: + # we need to filter out the state_dict of the AVCLIP model (has both A and V extractors) + # and keep only the state_dict of the feat extractor + ckpt_weights = dict() + for k, v in ckpt['state_dict'].items(): + if k.startswith(('module.v_encoder.', 'v_encoder.')): + k = k.replace('module.', '').replace('v_encoder.', '') + ckpt_weights[k] = v + _load_status = self.load_state_dict(ckpt_weights, strict=False) + if len(_load_status.missing_keys) > 0 or len(_load_status.unexpected_keys) > 0: + logging.warning(f'Loading exact vfeat_extractor ckpt from {self.ckpt_path} failed. \n' \ + f'Missing keys ({len(_load_status.missing_keys)}): ' \ + f'{_load_status.missing_keys}, \n' \ + f'Unexpected keys ({len(_load_status.unexpected_keys)}): ' \ + f'{_load_status.unexpected_keys} \n' \ + f'temp_attn_agg are expected to be missing if ckpt was pt contrastively.') + else: + logging.info(f'Loading vfeat_extractor ckpt from {self.ckpt_path} succeeded.') + + # patch_embed is not used in MotionFormer, only patch_embed_3d, because cfg.VIT.PATCH_SIZE_TEMP > 1 + # but it used to calculate the number of patches, so we need to set keep it + self.patch_embed.requires_grad_(False) + + def forward(self, x): + ''' + x is of shape (B, S, C, T, H, W) where S is the number of segments. + ''' + # Batch, Segments, Channels, T=frames, Height, Width + B, S, C, T, H, W = x.shape + # Motionformer expects a tensor of shape (1, B, C, T, H, W). + # The first dimension (1) is a dummy dimension to make the input tensor and won't be used: + # see `video_model_builder.video_input`. + # x = x.unsqueeze(0) # (1, B, S, C, T, H, W) + + orig_shape = (B, S, C, T, H, W) + x = x.view(B * S, C, T, H, W) # flatten batch and segments + x = self.forward_segments(x, orig_shape=orig_shape) + # unpack the segments (using rest dimensions to support different shapes e.g. (BS, D) or (BS, t, D)) + x = x.view(B, S, *x.shape[1:]) + # x is now of shape (B*S, D) or (B*S, t, D) if `self.temp_attn_agg` is `Identity` + + return x # x is (B, S, ...) + + def forward_segments(self, x, orig_shape: tuple) -> torch.Tensor: + '''x is of shape (1, BS, C, T, H, W) where S is the number of segments.''' + x, x_mask = self.forward_features(x) + + assert self.extract_features + + # (BS, T, D) where T = 1 + (224 // 16) * (224 // 16) * 8 + x = x[:, + 1:, :] # without the CLS token for efficiency (should be safe for LayerNorm and FC) + x = self.norm(x) + x = self.pre_logits(x) + if self.factorize_space_time: + x = self.restore_spatio_temp_dims(x, orig_shape) # (B*S, D, t, h, w) <- (B*S, t*h*w, D) + + x = self.spatial_attn_agg(x, x_mask) # (B*S, t, D) + x = self.temp_attn_agg( + x) # (B*S, D) or (BS, t, D) if `self.temp_attn_agg` is `Identity` + + return x + + def restore_spatio_temp_dims(self, feats: torch.Tensor, orig_shape: tuple) -> torch.Tensor: + ''' + feats are of shape (B*S, T, D) where T = 1 + (224 // 16) * (224 // 16) * 8 + Our goal is to make them of shape (B*S, t, h, w, D) where h, w are the spatial dimensions. + From `self.patch_embed_3d`, it follows that we could reshape feats with: + `feats.transpose(1, 2).view(B*S, D, t, h, w)` + ''' + B, S, C, T, H, W = orig_shape + D = self.embed_dim + + # num patches in each dimension + t = T // self.patch_embed_3d.z_block_size + h = self.patch_embed_3d.height + w = self.patch_embed_3d.width + + feats = feats.permute(0, 2, 1) # (B*S, D, T) + feats = feats.view(B * S, D, t, h, w) # (B*S, D, t, h, w) + + return feats + + +class BaseEncoderLayer(nn.TransformerEncoderLayer): + ''' + This is a wrapper around nn.TransformerEncoderLayer that adds a CLS token + to the sequence and outputs the CLS token's representation. + This base class parents both SpatialEncoderLayer and TemporalEncoderLayer for the RGB stream + and the FrequencyEncoderLayer and TemporalEncoderLayer for the audio stream stream. + We also, optionally, add a positional embedding to the input sequence which + allows to reuse it for global aggregation (of segments) for both streams. + ''' + + def __init__(self, + add_pos_emb: bool = False, + pos_emb_drop: float = None, + pos_max_len: int = None, + *args_transformer_enc, + **kwargs_transformer_enc): + super().__init__(*args_transformer_enc, **kwargs_transformer_enc) + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.self_attn.embed_dim)) + trunc_normal_(self.cls_token, std=.02) + + # add positional embedding + self.add_pos_emb = add_pos_emb + if add_pos_emb: + self.pos_max_len = 1 + pos_max_len # +1 (for CLS) + self.pos_emb = nn.Parameter(torch.zeros(1, self.pos_max_len, self.self_attn.embed_dim)) + self.pos_drop = nn.Dropout(pos_emb_drop) + trunc_normal_(self.pos_emb, std=.02) + + self.apply(self._init_weights) + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None): + ''' x is of shape (B, N, D); if provided x_mask is of shape (B, N)''' + batch_dim = x.shape[0] + + # add CLS token + cls_tokens = self.cls_token.expand(batch_dim, -1, -1) # expanding to match batch dimension + x = torch.cat((cls_tokens, x), dim=-2) # (batch_dim, 1+seq_len, D) + if x_mask is not None: + cls_mask = torch.ones((batch_dim, 1), dtype=torch.bool, + device=x_mask.device) # 1=keep; 0=mask + x_mask_w_cls = torch.cat((cls_mask, x_mask), dim=-1) # (batch_dim, 1+seq_len) + B, N = x_mask_w_cls.shape + # torch expects (N, N) or (B*num_heads, N, N) mask (sadness ahead); torch masks + x_mask_w_cls = x_mask_w_cls.reshape(B, 1, 1, N)\ + .expand(-1, self.self_attn.num_heads, N, -1)\ + .reshape(B * self.self_attn.num_heads, N, N) + assert x_mask_w_cls.dtype == x_mask_w_cls.bool().dtype, 'x_mask_w_cls.dtype != bool' + x_mask_w_cls = ~x_mask_w_cls # invert mask (1=mask) + else: + x_mask_w_cls = None + + # add positional embedding + if self.add_pos_emb: + seq_len = x.shape[ + 1] # (don't even think about moving it before the CLS token concatenation) + assert seq_len <= self.pos_max_len, f'Seq len ({seq_len}) > pos_max_len ({self.pos_max_len})' + x = x + self.pos_emb[:, :seq_len, :] + x = self.pos_drop(x) + + # apply encoder layer (calls nn.TransformerEncoderLayer.forward); + x = super().forward(src=x, src_mask=x_mask_w_cls) # (batch_dim, 1+seq_len, D) + + # CLS token is expected to hold spatial information for each frame + x = x[:, 0, :] # (batch_dim, D) + + return x + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + return {'cls_token', 'pos_emb'} + + +class SpatialTransformerEncoderLayer(BaseEncoderLayer): + ''' Aggregates spatial dimensions by applying attention individually to each frame. ''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None) -> torch.Tensor: + ''' x is of shape (B*S, D, t, h, w) where S is the number of segments. + if specified x_mask (B*S, t, h, w), 0=masked, 1=kept + Returns a tensor of shape (B*S, t, D) pooling spatial information for each frame. ''' + BS, D, t, h, w = x.shape + + # time as a batch dimension and flatten spatial dimensions as sequence + x = einops.rearrange(x, 'BS D t h w -> (BS t) (h w) D') + # similar to mask + if x_mask is not None: + x_mask = einops.rearrange(x_mask, 'BS t h w -> (BS t) (h w)') + + # apply encoder layer (BaseEncoderLayer.forward) - it will add CLS token and output its representation + x = super().forward(x=x, x_mask=x_mask) # (B*S*t, D) + + # reshape back to (B*S, t, D) + x = einops.rearrange(x, '(BS t) D -> BS t D', BS=BS, t=t) + + # (B*S, t, D) + return x + + +class TemporalTransformerEncoderLayer(BaseEncoderLayer): + ''' Aggregates temporal dimension with attention. Also used with pos emb as global aggregation + in both streams. ''' + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x): + ''' x is of shape (B*S, t, D) where S is the number of segments. + Returns a tensor of shape (B*S, D) pooling temporal information. ''' + BS, t, D = x.shape + + # apply encoder layer (BaseEncoderLayer.forward) - it will add CLS token and output its representation + x = super().forward(x) # (B*S, D) + + return x # (B*S, D) + + +class AveragePooling(nn.Module): + + def __init__(self, avg_pattern: str, then_permute_pattern: str = None) -> None: + ''' patterns are e.g. "bs t d -> bs d" ''' + super().__init__() + # TODO: need to register them as buffers (but fails because these are strings) + self.reduce_fn = 'mean' + self.avg_pattern = avg_pattern + self.then_permute_pattern = then_permute_pattern + + def forward(self, x: torch.Tensor, x_mask: torch.Tensor = None) -> torch.Tensor: + x = einops.reduce(x, self.avg_pattern, self.reduce_fn) + if self.then_permute_pattern is not None: + x = einops.rearrange(x, self.then_permute_pattern) + return x diff --git a/mmaudio/ext/synchformer/synchformer.py b/mmaudio/ext/synchformer/synchformer.py new file mode 100644 index 0000000000000000000000000000000000000000..80871f004d6f4c57f48594d90195f84f89d7cb0a --- /dev/null +++ b/mmaudio/ext/synchformer/synchformer.py @@ -0,0 +1,55 @@ +import logging +from typing import Any, Mapping + +import torch +from torch import nn + +from mmaudio.ext.synchformer.motionformer import MotionFormer + + +class Synchformer(nn.Module): + + def __init__(self): + super().__init__() + + self.vfeat_extractor = MotionFormer(extract_features=True, + factorize_space_time=True, + agg_space_module='TransformerEncoderLayer', + agg_time_module='torch.nn.Identity', + add_global_repr=False) + + # self.vfeat_extractor = instantiate_from_config(vfeat_extractor) + # self.afeat_extractor = instantiate_from_config(afeat_extractor) + # # bridging the s3d latent dim (1024) into what is specified in the config + # # to match e.g. the transformer dim + # self.vproj = instantiate_from_config(vproj) + # self.aproj = instantiate_from_config(aproj) + # self.transformer = instantiate_from_config(transformer) + + def forward(self, vis): + B, S, Tv, C, H, W = vis.shape + vis = vis.permute(0, 1, 3, 2, 4, 5) # (B, S, C, Tv, H, W) + # feat extractors return a tuple of segment-level and global features (ignored for sync) + # (B, S, tv, D), e.g. (B, 7, 8, 768) + vis = self.vfeat_extractor(vis) + return vis + + def load_state_dict(self, sd: Mapping[str, Any], strict: bool = True): + # discard all entries except vfeat_extractor + sd = {k: v for k, v in sd.items() if k.startswith('vfeat_extractor')} + + return super().load_state_dict(sd, strict) + + +if __name__ == "__main__": + model = Synchformer().cuda().eval() + sd = torch.load('./ext_weights/synchformer_state_dict.pth', weights_only=True) + model.load_state_dict(sd) + + vid = torch.randn(2, 7, 16, 3, 224, 224).cuda() + features = model.extract_vfeats(vid, for_loop=False).detach().cpu() + print(features.shape) + + # extract and save the state dict only + # sd = torch.load('./ext_weights/sync_model_audioset.pt')['model'] + # torch.save(sd, './ext_weights/synchformer_state_dict.pth') diff --git a/mmaudio/ext/synchformer/utils.py b/mmaudio/ext/synchformer/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a797eb9c66f04b7c29934bfc384c935cdf441a62 --- /dev/null +++ b/mmaudio/ext/synchformer/utils.py @@ -0,0 +1,92 @@ +from hashlib import md5 +from pathlib import Path + +import requests +from tqdm import tqdm + +PARENT_LINK = 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a' +FNAME2LINK = { + # S3: Synchability: AudioSet (run 2) + '24-01-22T20-34-52.pt': + f'{PARENT_LINK}/sync/sync_models/24-01-22T20-34-52/24-01-22T20-34-52.pt', + 'cfg-24-01-22T20-34-52.yaml': + f'{PARENT_LINK}/sync/sync_models/24-01-22T20-34-52/cfg-24-01-22T20-34-52.yaml', + # S2: Synchformer: AudioSet (run 2) + '24-01-04T16-39-21.pt': + f'{PARENT_LINK}/sync/sync_models/24-01-04T16-39-21/24-01-04T16-39-21.pt', + 'cfg-24-01-04T16-39-21.yaml': + f'{PARENT_LINK}/sync/sync_models/24-01-04T16-39-21/cfg-24-01-04T16-39-21.yaml', + # S2: Synchformer: AudioSet (run 1) + '23-08-28T11-23-23.pt': + f'{PARENT_LINK}/sync/sync_models/23-08-28T11-23-23/23-08-28T11-23-23.pt', + 'cfg-23-08-28T11-23-23.yaml': + f'{PARENT_LINK}/sync/sync_models/23-08-28T11-23-23/cfg-23-08-28T11-23-23.yaml', + # S2: Synchformer: LRS3 (run 2) + '23-12-23T18-33-57.pt': + f'{PARENT_LINK}/sync/sync_models/23-12-23T18-33-57/23-12-23T18-33-57.pt', + 'cfg-23-12-23T18-33-57.yaml': + f'{PARENT_LINK}/sync/sync_models/23-12-23T18-33-57/cfg-23-12-23T18-33-57.yaml', + # S2: Synchformer: VGS (run 2) + '24-01-02T10-00-53.pt': + f'{PARENT_LINK}/sync/sync_models/24-01-02T10-00-53/24-01-02T10-00-53.pt', + 'cfg-24-01-02T10-00-53.yaml': + f'{PARENT_LINK}/sync/sync_models/24-01-02T10-00-53/cfg-24-01-02T10-00-53.yaml', + # SparseSync: ft VGGSound-Full + '22-09-21T21-00-52.pt': + f'{PARENT_LINK}/sync/sync_models/22-09-21T21-00-52/22-09-21T21-00-52.pt', + 'cfg-22-09-21T21-00-52.yaml': + f'{PARENT_LINK}/sync/sync_models/22-09-21T21-00-52/cfg-22-09-21T21-00-52.yaml', + # SparseSync: ft VGGSound-Sparse + '22-07-28T15-49-45.pt': + f'{PARENT_LINK}/sync/sync_models/22-07-28T15-49-45/22-07-28T15-49-45.pt', + 'cfg-22-07-28T15-49-45.yaml': + f'{PARENT_LINK}/sync/sync_models/22-07-28T15-49-45/cfg-22-07-28T15-49-45.yaml', + # SparseSync: only pt on LRS3 + '22-07-13T22-25-49.pt': + f'{PARENT_LINK}/sync/sync_models/22-07-13T22-25-49/22-07-13T22-25-49.pt', + 'cfg-22-07-13T22-25-49.yaml': + f'{PARENT_LINK}/sync/sync_models/22-07-13T22-25-49/cfg-22-07-13T22-25-49.yaml', + # SparseSync: feature extractors + 'ResNetAudio-22-08-04T09-51-04.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-08-04T09-51-04.pt', # 2s + 'ResNetAudio-22-08-03T23-14-49.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-08-03T23-14-49.pt', # 3s + 'ResNetAudio-22-08-03T23-14-28.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-08-03T23-14-28.pt', # 4s + 'ResNetAudio-22-06-24T08-10-33.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-24T08-10-33.pt', # 5s + 'ResNetAudio-22-06-24T17-31-07.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-24T17-31-07.pt', # 6s + 'ResNetAudio-22-06-24T23-57-11.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-24T23-57-11.pt', # 7s + 'ResNetAudio-22-06-25T04-35-42.pt': + f'{PARENT_LINK}/sync/ResNetAudio-22-06-25T04-35-42.pt', # 8s +} + + +def check_if_file_exists_else_download(path, fname2link=FNAME2LINK, chunk_size=1024): + '''Checks if file exists, if not downloads it from the link to the path''' + path = Path(path) + if not path.exists(): + path.parent.mkdir(exist_ok=True, parents=True) + link = fname2link.get(path.name, None) + if link is None: + raise ValueError(f'Cant find the checkpoint file: {path}.', + f'Please download it manually and ensure the path exists.') + with requests.get(fname2link[path.name], stream=True) as r: + total_size = int(r.headers.get('content-length', 0)) + with tqdm(total=total_size, unit='B', unit_scale=True) as pbar: + with open(path, 'wb') as f: + for data in r.iter_content(chunk_size=chunk_size): + if data: + f.write(data) + pbar.update(chunk_size) + + +def get_md5sum(path): + hash_md5 = md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: f.read(4096 * 8), b''): + hash_md5.update(chunk) + md5sum = hash_md5.hexdigest() + return md5sum diff --git a/mmaudio/ext/synchformer/video_model_builder.py b/mmaudio/ext/synchformer/video_model_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3defae4d07806086fd654906fab3d9f64ba4544f --- /dev/null +++ b/mmaudio/ext/synchformer/video_model_builder.py @@ -0,0 +1,277 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2020 Ross Wightman +# Modified Model definition + +from collections import OrderedDict +from functools import partial + +import torch +import torch.nn as nn +from timm.layers import trunc_normal_ + +from mmaudio.ext.synchformer import vit_helper + + +class VisionTransformer(nn.Module): + """ Vision Transformer with support for patch or hybrid CNN input stage """ + + def __init__(self, cfg): + super().__init__() + self.img_size = cfg.DATA.TRAIN_CROP_SIZE + self.patch_size = cfg.VIT.PATCH_SIZE + self.in_chans = cfg.VIT.CHANNELS + if cfg.TRAIN.DATASET == "Epickitchens": + self.num_classes = [97, 300] + else: + self.num_classes = cfg.MODEL.NUM_CLASSES + self.embed_dim = cfg.VIT.EMBED_DIM + self.depth = cfg.VIT.DEPTH + self.num_heads = cfg.VIT.NUM_HEADS + self.mlp_ratio = cfg.VIT.MLP_RATIO + self.qkv_bias = cfg.VIT.QKV_BIAS + self.drop_rate = cfg.VIT.DROP + self.drop_path_rate = cfg.VIT.DROP_PATH + self.head_dropout = cfg.VIT.HEAD_DROPOUT + self.video_input = cfg.VIT.VIDEO_INPUT + self.temporal_resolution = cfg.VIT.TEMPORAL_RESOLUTION + self.use_mlp = cfg.VIT.USE_MLP + self.num_features = self.embed_dim + norm_layer = partial(nn.LayerNorm, eps=1e-6) + self.attn_drop_rate = cfg.VIT.ATTN_DROPOUT + self.head_act = cfg.VIT.HEAD_ACT + self.cfg = cfg + + # Patch Embedding + self.patch_embed = vit_helper.PatchEmbed(img_size=224, + patch_size=self.patch_size, + in_chans=self.in_chans, + embed_dim=self.embed_dim) + + # 3D Patch Embedding + self.patch_embed_3d = vit_helper.PatchEmbed3D(img_size=self.img_size, + temporal_resolution=self.temporal_resolution, + patch_size=self.patch_size, + in_chans=self.in_chans, + embed_dim=self.embed_dim, + z_block_size=self.cfg.VIT.PATCH_SIZE_TEMP) + self.patch_embed_3d.proj.weight.data = torch.zeros_like( + self.patch_embed_3d.proj.weight.data) + + # Number of patches + if self.video_input: + num_patches = self.patch_embed.num_patches * self.temporal_resolution + else: + num_patches = self.patch_embed.num_patches + self.num_patches = num_patches + + # CLS token + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim)) + trunc_normal_(self.cls_token, std=.02) + + # Positional embedding + self.pos_embed = nn.Parameter( + torch.zeros(1, self.patch_embed.num_patches + 1, self.embed_dim)) + self.pos_drop = nn.Dropout(p=cfg.VIT.POS_DROPOUT) + trunc_normal_(self.pos_embed, std=.02) + + if self.cfg.VIT.POS_EMBED == "joint": + self.st_embed = nn.Parameter(torch.zeros(1, num_patches + 1, self.embed_dim)) + trunc_normal_(self.st_embed, std=.02) + elif self.cfg.VIT.POS_EMBED == "separate": + self.temp_embed = nn.Parameter(torch.zeros(1, self.temporal_resolution, self.embed_dim)) + + # Layer Blocks + dpr = [x.item() for x in torch.linspace(0, self.drop_path_rate, self.depth)] + if self.cfg.VIT.ATTN_LAYER == "divided": + self.blocks = nn.ModuleList([ + vit_helper.DividedSpaceTimeBlock( + attn_type=cfg.VIT.ATTN_LAYER, + dim=self.embed_dim, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + qkv_bias=self.qkv_bias, + drop=self.drop_rate, + attn_drop=self.attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + ) for i in range(self.depth) + ]) + else: + self.blocks = nn.ModuleList([ + vit_helper.Block(attn_type=cfg.VIT.ATTN_LAYER, + dim=self.embed_dim, + num_heads=self.num_heads, + mlp_ratio=self.mlp_ratio, + qkv_bias=self.qkv_bias, + drop=self.drop_rate, + attn_drop=self.attn_drop_rate, + drop_path=dpr[i], + norm_layer=norm_layer, + use_original_code=self.cfg.VIT.USE_ORIGINAL_TRAJ_ATTN_CODE) + for i in range(self.depth) + ]) + self.norm = norm_layer(self.embed_dim) + + # MLP head + if self.use_mlp: + hidden_dim = self.embed_dim + if self.head_act == 'tanh': + # logging.info("Using TanH activation in MLP") + act = nn.Tanh() + elif self.head_act == 'gelu': + # logging.info("Using GELU activation in MLP") + act = nn.GELU() + else: + # logging.info("Using ReLU activation in MLP") + act = nn.ReLU() + self.pre_logits = nn.Sequential( + OrderedDict([ + ('fc', nn.Linear(self.embed_dim, hidden_dim)), + ('act', act), + ])) + else: + self.pre_logits = nn.Identity() + + # Classifier Head + self.head_drop = nn.Dropout(p=self.head_dropout) + if isinstance(self.num_classes, (list, )) and len(self.num_classes) > 1: + for a, i in enumerate(range(len(self.num_classes))): + setattr(self, "head%d" % a, nn.Linear(self.embed_dim, self.num_classes[i])) + else: + self.head = nn.Linear(self.embed_dim, + self.num_classes) if self.num_classes > 0 else nn.Identity() + + # Initialize weights + self.apply(self._init_weights) + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if isinstance(m, nn.Linear) and m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) + + @torch.jit.ignore + def no_weight_decay(self): + if self.cfg.VIT.POS_EMBED == "joint": + return {'pos_embed', 'cls_token', 'st_embed'} + else: + return {'pos_embed', 'cls_token', 'temp_embed'} + + def get_classifier(self): + return self.head + + def reset_classifier(self, num_classes, global_pool=''): + self.num_classes = num_classes + self.head = (nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()) + + def forward_features(self, x): + # if self.video_input: + # x = x[0] + B = x.shape[0] + + # Tokenize input + # if self.cfg.VIT.PATCH_SIZE_TEMP > 1: + # for simplicity of mapping between content dimensions (input x) and token dims (after patching) + # we use the same trick as for AST (see modeling_ast.ASTModel.forward for the details): + + # apply patching on input + x = self.patch_embed_3d(x) + tok_mask = None + + # else: + # tok_mask = None + # # 2D tokenization + # if self.video_input: + # x = x.permute(0, 2, 1, 3, 4) + # (B, T, C, H, W) = x.shape + # x = x.reshape(B * T, C, H, W) + + # x = self.patch_embed(x) + + # if self.video_input: + # (B2, T2, D2) = x.shape + # x = x.reshape(B, T * T2, D2) + + # Append CLS token + cls_tokens = self.cls_token.expand(B, -1, -1) + x = torch.cat((cls_tokens, x), dim=1) + # if tok_mask is not None: + # # prepend 1(=keep) to the mask to account for the CLS token as well + # tok_mask = torch.cat((torch.ones_like(tok_mask[:, [0]]), tok_mask), dim=1) + + # Interpolate positinoal embeddings + # if self.cfg.DATA.TRAIN_CROP_SIZE != 224: + # pos_embed = self.pos_embed + # N = pos_embed.shape[1] - 1 + # npatch = int((x.size(1) - 1) / self.temporal_resolution) + # class_emb = pos_embed[:, 0] + # pos_embed = pos_embed[:, 1:] + # dim = x.shape[-1] + # pos_embed = torch.nn.functional.interpolate( + # pos_embed.reshape(1, int(math.sqrt(N)), int(math.sqrt(N)), dim).permute(0, 3, 1, 2), + # scale_factor=math.sqrt(npatch / N), + # mode='bicubic', + # ) + # pos_embed = pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) + # new_pos_embed = torch.cat((class_emb.unsqueeze(0), pos_embed), dim=1) + # else: + new_pos_embed = self.pos_embed + npatch = self.patch_embed.num_patches + + # Add positional embeddings to input + if self.video_input: + if self.cfg.VIT.POS_EMBED == "separate": + cls_embed = self.pos_embed[:, 0, :].unsqueeze(1) + tile_pos_embed = new_pos_embed[:, 1:, :].repeat(1, self.temporal_resolution, 1) + tile_temporal_embed = self.temp_embed.repeat_interleave(npatch, 1) + total_pos_embed = tile_pos_embed + tile_temporal_embed + total_pos_embed = torch.cat([cls_embed, total_pos_embed], dim=1) + x = x + total_pos_embed + elif self.cfg.VIT.POS_EMBED == "joint": + x = x + self.st_embed + else: + # image input + x = x + new_pos_embed + + # Apply positional dropout + x = self.pos_drop(x) + + # Encoding using transformer layers + for i, blk in enumerate(self.blocks): + x = blk(x, + seq_len=npatch, + num_frames=self.temporal_resolution, + approx=self.cfg.VIT.APPROX_ATTN_TYPE, + num_landmarks=self.cfg.VIT.APPROX_ATTN_DIM, + tok_mask=tok_mask) + + ### v-iashin: I moved it to the forward pass + # x = self.norm(x)[:, 0] + # x = self.pre_logits(x) + ### + return x, tok_mask + + # def forward(self, x): + # x = self.forward_features(x) + # ### v-iashin: here. This should leave the same forward output as before + # x = self.norm(x)[:, 0] + # x = self.pre_logits(x) + # ### + # x = self.head_drop(x) + # if isinstance(self.num_classes, (list, )) and len(self.num_classes) > 1: + # output = [] + # for head in range(len(self.num_classes)): + # x_out = getattr(self, "head%d" % head)(x) + # if not self.training: + # x_out = torch.nn.functional.softmax(x_out, dim=-1) + # output.append(x_out) + # return output + # else: + # x = self.head(x) + # if not self.training: + # x = torch.nn.functional.softmax(x, dim=-1) + # return x diff --git a/mmaudio/ext/synchformer/vit_helper.py b/mmaudio/ext/synchformer/vit_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..6af730a135bf49240ec439c81c9ad0aa5c9a505e --- /dev/null +++ b/mmaudio/ext/synchformer/vit_helper.py @@ -0,0 +1,399 @@ +#!/usr/bin/env python3 +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. +# Copyright 2020 Ross Wightman +# Modified Model definition +"""Video models.""" + +import math + +import torch +import torch.nn as nn +from einops import rearrange, repeat +from timm.layers import to_2tuple +from torch import einsum +from torch.nn import functional as F + +default_cfgs = { + 'vit_1k': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth', + 'vit_1k_large': + 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_224-4ee7a4dc.pth', +} + + +def qkv_attn(q, k, v, tok_mask: torch.Tensor = None): + sim = einsum('b i d, b j d -> b i j', q, k) + # apply masking if provided, tok_mask is (B*S*H, N): 1s - keep; sim is (B*S*H, H, N, N) + if tok_mask is not None: + BSH, N = tok_mask.shape + sim = sim.masked_fill(tok_mask.view(BSH, 1, N) == 0, + float('-inf')) # 1 - broadcasts across N + attn = sim.softmax(dim=-1) + out = einsum('b i j, b j d -> b i d', attn, v) + return out + + +class DividedAttention(nn.Module): + + def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = head_dim**-0.5 + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.proj = nn.Linear(dim, dim) + + # init to zeros + self.qkv.weight.data.fill_(0) + self.qkv.bias.data.fill_(0) + self.proj.weight.data.fill_(1) + self.proj.bias.data.fill_(0) + + self.attn_drop = nn.Dropout(attn_drop) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x, einops_from, einops_to, tok_mask: torch.Tensor = None, **einops_dims): + # num of heads variable + h = self.num_heads + + # project x to q, k, v vaalues + q, k, v = self.qkv(x).chunk(3, dim=-1) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + if tok_mask is not None: + # replicate token mask across heads (b, n) -> (b, h, n) -> (b*h, n) -- same as qkv but w/o d + assert len(tok_mask.shape) == 2 + tok_mask = tok_mask.unsqueeze(1).expand(-1, h, -1).reshape(-1, tok_mask.shape[1]) + + # Scale q + q *= self.scale + + # Take out cls_q, cls_k, cls_v + (cls_q, q_), (cls_k, k_), (cls_v, v_) = map(lambda t: (t[:, 0:1], t[:, 1:]), (q, k, v)) + # the same for masking + if tok_mask is not None: + cls_mask, mask_ = tok_mask[:, 0:1], tok_mask[:, 1:] + else: + cls_mask, mask_ = None, None + + # let CLS token attend to key / values of all patches across time and space + cls_out = qkv_attn(cls_q, k, v, tok_mask=tok_mask) + + # rearrange across time or space + q_, k_, v_ = map(lambda t: rearrange(t, f'{einops_from} -> {einops_to}', **einops_dims), + (q_, k_, v_)) + + # expand CLS token keys and values across time or space and concat + r = q_.shape[0] // cls_k.shape[0] + cls_k, cls_v = map(lambda t: repeat(t, 'b () d -> (b r) () d', r=r), (cls_k, cls_v)) + + k_ = torch.cat((cls_k, k_), dim=1) + v_ = torch.cat((cls_v, v_), dim=1) + + # the same for masking (if provided) + if tok_mask is not None: + # since mask does not have the latent dim (d), we need to remove it from einops dims + mask_ = rearrange(mask_, f'{einops_from} -> {einops_to}'.replace(' d', ''), + **einops_dims) + cls_mask = repeat(cls_mask, 'b () -> (b r) ()', + r=r) # expand cls_mask across time or space + mask_ = torch.cat((cls_mask, mask_), dim=1) + + # attention + out = qkv_attn(q_, k_, v_, tok_mask=mask_) + + # merge back time or space + out = rearrange(out, f'{einops_to} -> {einops_from}', **einops_dims) + + # concat back the cls token + out = torch.cat((cls_out, out), dim=1) + + # merge back the heads + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + + ## to out + x = self.proj(out) + x = self.proj_drop(x) + return x + + +class DividedSpaceTimeBlock(nn.Module): + + def __init__(self, + dim=768, + num_heads=12, + attn_type='divided', + mlp_ratio=4., + qkv_bias=False, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + + self.einops_from_space = 'b (f n) d' + self.einops_to_space = '(b f) n d' + self.einops_from_time = 'b (f n) d' + self.einops_to_time = '(b n) f d' + + self.norm1 = norm_layer(dim) + + self.attn = DividedAttention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + self.timeattn = DividedAttention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + attn_drop=attn_drop, + proj_drop=drop) + + # self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.drop_path = nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + self.norm3 = norm_layer(dim) + + def forward(self, + x, + seq_len=196, + num_frames=8, + approx='none', + num_landmarks=128, + tok_mask: torch.Tensor = None): + time_output = self.timeattn(self.norm3(x), + self.einops_from_time, + self.einops_to_time, + n=seq_len, + tok_mask=tok_mask) + time_residual = x + time_output + + space_output = self.attn(self.norm1(time_residual), + self.einops_from_space, + self.einops_to_space, + f=num_frames, + tok_mask=tok_mask) + space_residual = time_residual + self.drop_path(space_output) + + x = space_residual + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class Mlp(nn.Module): + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + """ + + def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768): + super().__init__() + img_size = img_size if type(img_size) is tuple else to_2tuple(img_size) + patch_size = img_size if type(patch_size) is tuple else to_2tuple(patch_size) + num_patches = (img_size[1] // patch_size[1]) * (img_size[0] // patch_size[0]) + self.img_size = img_size + self.patch_size = patch_size + self.num_patches = num_patches + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + + def forward(self, x): + B, C, H, W = x.shape + x = self.proj(x).flatten(2).transpose(1, 2) + return x + + +class PatchEmbed3D(nn.Module): + """ Image to Patch Embedding """ + + def __init__(self, + img_size=224, + temporal_resolution=4, + in_chans=3, + patch_size=16, + z_block_size=2, + embed_dim=768, + flatten=True): + super().__init__() + self.height = (img_size // patch_size) + self.width = (img_size // patch_size) + ### v-iashin: these two are incorrect + # self.frames = (temporal_resolution // z_block_size) + # self.num_patches = self.height * self.width * self.frames + self.z_block_size = z_block_size + ### + self.proj = nn.Conv3d(in_chans, + embed_dim, + kernel_size=(z_block_size, patch_size, patch_size), + stride=(z_block_size, patch_size, patch_size)) + self.flatten = flatten + + def forward(self, x): + B, C, T, H, W = x.shape + x = self.proj(x) + if self.flatten: + x = x.flatten(2).transpose(1, 2) + return x + + +class HeadMLP(nn.Module): + + def __init__(self, n_input, n_classes, n_hidden=512, p=0.1): + super(HeadMLP, self).__init__() + self.n_input = n_input + self.n_classes = n_classes + self.n_hidden = n_hidden + if n_hidden is None: + # use linear classifier + self.block_forward = nn.Sequential(nn.Dropout(p=p), + nn.Linear(n_input, n_classes, bias=True)) + else: + # use simple MLP classifier + self.block_forward = nn.Sequential(nn.Dropout(p=p), + nn.Linear(n_input, n_hidden, bias=True), + nn.BatchNorm1d(n_hidden), nn.ReLU(inplace=True), + nn.Dropout(p=p), + nn.Linear(n_hidden, n_classes, bias=True)) + print(f"Dropout-NLP: {p}") + + def forward(self, x): + return self.block_forward(x) + + +def _conv_filter(state_dict, patch_size=16): + """ convert patch embedding weight from manual patchify + linear proj to conv""" + out_dict = {} + for k, v in state_dict.items(): + if 'patch_embed.proj.weight' in k: + v = v.reshape((v.shape[0], 3, patch_size, patch_size)) + out_dict[k] = v + return out_dict + + +def adapt_input_conv(in_chans, conv_weight, agg='sum'): + conv_type = conv_weight.dtype + conv_weight = conv_weight.float() + O, I, J, K = conv_weight.shape + if in_chans == 1: + if I > 3: + assert conv_weight.shape[1] % 3 == 0 + # For models with space2depth stems + conv_weight = conv_weight.reshape(O, I // 3, 3, J, K) + conv_weight = conv_weight.sum(dim=2, keepdim=False) + else: + if agg == 'sum': + print("Summing conv1 weights") + conv_weight = conv_weight.sum(dim=1, keepdim=True) + else: + print("Averaging conv1 weights") + conv_weight = conv_weight.mean(dim=1, keepdim=True) + elif in_chans != 3: + if I != 3: + raise NotImplementedError('Weight format not supported by conversion.') + else: + if agg == 'sum': + print("Summing conv1 weights") + repeat = int(math.ceil(in_chans / 3)) + conv_weight = conv_weight.repeat(1, repeat, 1, 1)[:, :in_chans, :, :] + conv_weight *= (3 / float(in_chans)) + else: + print("Averaging conv1 weights") + conv_weight = conv_weight.mean(dim=1, keepdim=True) + conv_weight = conv_weight.repeat(1, in_chans, 1, 1) + conv_weight = conv_weight.to(conv_type) + return conv_weight + + +def load_pretrained(model, + cfg=None, + num_classes=1000, + in_chans=3, + filter_fn=None, + strict=True, + progress=False): + # Load state dict + assert (f"{cfg.VIT.PRETRAINED_WEIGHTS} not in [vit_1k, vit_1k_large]") + state_dict = torch.hub.load_state_dict_from_url(url=default_cfgs[cfg.VIT.PRETRAINED_WEIGHTS]) + + if filter_fn is not None: + state_dict = filter_fn(state_dict) + + input_convs = 'patch_embed.proj' + if input_convs is not None and in_chans != 3: + if isinstance(input_convs, str): + input_convs = (input_convs, ) + for input_conv_name in input_convs: + weight_name = input_conv_name + '.weight' + try: + state_dict[weight_name] = adapt_input_conv(in_chans, + state_dict[weight_name], + agg='avg') + print( + f'Converted input conv {input_conv_name} pretrained weights from 3 to {in_chans} channel(s)' + ) + except NotImplementedError as e: + del state_dict[weight_name] + strict = False + print( + f'Unable to convert pretrained {input_conv_name} weights, using random init for this layer.' + ) + + classifier_name = 'head' + label_offset = cfg.get('label_offset', 0) + pretrain_classes = 1000 + if num_classes != pretrain_classes: + # completely discard fully connected if model num_classes doesn't match pretrained weights + del state_dict[classifier_name + '.weight'] + del state_dict[classifier_name + '.bias'] + strict = False + elif label_offset > 0: + # special case for pretrained weights with an extra background class in pretrained weights + classifier_weight = state_dict[classifier_name + '.weight'] + state_dict[classifier_name + '.weight'] = classifier_weight[label_offset:] + classifier_bias = state_dict[classifier_name + '.bias'] + state_dict[classifier_name + '.bias'] = classifier_bias[label_offset:] + + loaded_state = state_dict + self_state = model.state_dict() + all_names = set(self_state.keys()) + saved_names = set([]) + for name, param in loaded_state.items(): + param = param + if 'module.' in name: + name = name.replace('module.', '') + if name in self_state.keys() and param.shape == self_state[name].shape: + saved_names.add(name) + self_state[name].copy_(param) + else: + print(f"didnt load: {name} of shape: {param.shape}") + print("Missing Keys:") + print(all_names - saved_names) diff --git a/mmaudio/model/__init__.py b/mmaudio/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/model/embeddings.py b/mmaudio/model/embeddings.py new file mode 100644 index 0000000000000000000000000000000000000000..d447a98f941f1231d1b1dac716db3047a6a8eb88 --- /dev/null +++ b/mmaudio/model/embeddings.py @@ -0,0 +1,48 @@ +import torch +import torch.nn as nn + +# https://github.com/facebookresearch/DiT + + +class TimestepEmbedder(nn.Module): + """ + Embeds scalar timesteps into vector representations. + """ + + def __init__(self, dim, frequency_embedding_size, max_period): + super().__init__() + self.mlp = nn.Sequential( + nn.Linear(frequency_embedding_size, dim), + nn.SiLU(), + nn.Linear(dim, dim), + ) + self.dim = dim + self.max_period = max_period + assert dim % 2 == 0, 'dim must be even.' + + with torch.autocast('cuda', enabled=False): + self.freqs = ( + 1.0 / (10000**(torch.arange(0, frequency_embedding_size, 2, dtype=torch.float32) / + frequency_embedding_size))) + freq_scale = 10000 / max_period + self.freqs = nn.Parameter(freq_scale * self.freqs) + + def timestep_embedding(self, t): + """ + Create sinusoidal timestep embeddings. + :param t: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an (N, D) Tensor of positional embeddings. + """ + # https://github.com/openai/glide-text2im/blob/main/glide_text2im/nn.py + + args = t[:, None].float() * self.freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + return embedding + + def forward(self, t): + t_freq = self.timestep_embedding(t).to(t.dtype) + t_emb = self.mlp(t_freq) + return t_emb diff --git a/mmaudio/model/flow_matching.py b/mmaudio/model/flow_matching.py new file mode 100644 index 0000000000000000000000000000000000000000..a04510ab888c0c3c3398360f97b8b7e3c55998ad --- /dev/null +++ b/mmaudio/model/flow_matching.py @@ -0,0 +1,88 @@ +import logging +from typing import Callable, Iterable, Optional + +import torch +from torchdiffeq import odeint + +# from torchcfm.conditional_flow_matching import ExactOptimalTransportConditionalFlowMatcher + +log = logging.getLogger() + + +# Partially from https://github.com/gle-bellier/flow-matching +class FlowMatching: + + def __init__(self, min_sigma: float = 0.0, inference_mode='euler', num_steps: int = 25): + # inference_mode: 'euler' or 'adaptive' + # num_steps: number of steps in the euler inference mode + super().__init__() + self.min_sigma = min_sigma + self.inference_mode = inference_mode + self.num_steps = num_steps + + # self.fm = ExactOptimalTransportConditionalFlowMatcher(sigma=min_sigma) + + assert self.inference_mode in ['euler', 'adaptive'] + if self.inference_mode == 'adaptive' and num_steps > 0: + log.info('The number of steps is ignored in adaptive inference mode ') + + def get_conditional_flow(self, x0: torch.Tensor, x1: torch.Tensor, + t: torch.Tensor) -> torch.Tensor: + # which is psi_t(x), eq 22 in flow matching for generative models + t = t[:, None, None].expand_as(x0) + return (1 - (1 - self.min_sigma) * t) * x0 + t * x1 + + def loss(self, predicted_v: torch.Tensor, x0: torch.Tensor, x1: torch.Tensor) -> torch.Tensor: + # return the mean error without reducing the batch dimension + reduce_dim = list(range(1, len(predicted_v.shape))) + target_v = x1 - (1 - self.min_sigma) * x0 + return (predicted_v - target_v).pow(2).mean(dim=reduce_dim) + + def get_x0_xt_c( + self, + x1: torch.Tensor, + t: torch.Tensor, + Cs: list[torch.Tensor], + generator: Optional[torch.Generator] = None + ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + # x0 = torch.randn_like(x1, generator=generator) + x0 = torch.empty_like(x1).normal_(generator=generator) + + # find mini-batch optimal transport + # x0, x1, _, Cs = self.fm.ot_sampler.sample_plan_with_labels(x0, x1, None, Cs, replace=True) + + xt = self.get_conditional_flow(x0, x1, t) + return x0, x1, xt, Cs + + def to_prior(self, fn: Callable, x1: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x1, 1, 0) + + def to_data(self, fn: Callable, x0: torch.Tensor) -> torch.Tensor: + return self.run_t0_to_t1(fn, x0, 0, 1) + + def run_t0_to_t1(self, fn: Callable, x0: torch.Tensor, t0: float, t1: float) -> torch.Tensor: + # fn: a function that takes (t, x) and returns the direction x0->x1 + + if self.inference_mode == 'adaptive': + return odeint(fn, x0, torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype)) + elif self.inference_mode == 'euler': + x = x0 + steps = torch.linspace(t0, t1 - self.min_sigma, self.num_steps + 1) + for ti, t in enumerate(steps[:-1]): + flow = fn(t, x) + next_t = steps[ti + 1] + dt = next_t - t + x = x + dt * flow + + # return odeint(fn, + # x0, + # torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype), + # method='rk4', + # options=dict(step_size=(t1 - t0) / self.num_steps))[-1] + # return odeint(fn, + # x0, + # torch.tensor([t0, t1], device=x0.device, dtype=x0.dtype), + # method='euler', + # options=dict(step_size=(t1 - t0) / self.num_steps))[-1] + + return x diff --git a/mmaudio/model/low_level.py b/mmaudio/model/low_level.py new file mode 100644 index 0000000000000000000000000000000000000000..c8326a8bec99f1be08b92e76fda4b59e777b39d2 --- /dev/null +++ b/mmaudio/model/low_level.py @@ -0,0 +1,95 @@ +import torch +from torch import nn +from torch.nn import functional as F + + +class ChannelLastConv1d(nn.Conv1d): + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = x.permute(0, 2, 1) + x = super().forward(x) + x = x.permute(0, 2, 1) + return x + + +# https://github.com/Stability-AI/sd3-ref +class MLP(nn.Module): + + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + ): + """ + Initialize the FeedForward module. + + Args: + dim (int): Input dimension. + hidden_dim (int): Hidden dimension of the feedforward layer. + multiple_of (int): Value to ensure hidden dimension is a multiple of this value. + + Attributes: + w1 (ColumnParallelLinear): Linear transformation for the first layer. + w2 (RowParallelLinear): Linear transformation for the second layer. + w3 (ColumnParallelLinear): Linear transformation for the third layer. + + """ + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = nn.Linear(dim, hidden_dim, bias=False) + self.w2 = nn.Linear(hidden_dim, dim, bias=False) + self.w3 = nn.Linear(dim, hidden_dim, bias=False) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) + + +class ConvMLP(nn.Module): + + def __init__( + self, + dim: int, + hidden_dim: int, + multiple_of: int = 256, + kernel_size: int = 3, + padding: int = 1, + ): + """ + Initialize the FeedForward module. + + Args: + dim (int): Input dimension. + hidden_dim (int): Hidden dimension of the feedforward layer. + multiple_of (int): Value to ensure hidden dimension is a multiple of this value. + + Attributes: + w1 (ColumnParallelLinear): Linear transformation for the first layer. + w2 (RowParallelLinear): Linear transformation for the second layer. + w3 (ColumnParallelLinear): Linear transformation for the third layer. + + """ + super().__init__() + hidden_dim = int(2 * hidden_dim / 3) + hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) + + self.w1 = ChannelLastConv1d(dim, + hidden_dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + self.w2 = ChannelLastConv1d(hidden_dim, + dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + self.w3 = ChannelLastConv1d(dim, + hidden_dim, + bias=False, + kernel_size=kernel_size, + padding=padding) + + def forward(self, x): + return self.w2(F.silu(self.w1(x)) * self.w3(x)) diff --git a/mmaudio/model/networks.py b/mmaudio/model/networks.py new file mode 100644 index 0000000000000000000000000000000000000000..e60e309c89d92cec70e7e673a4e842cc6716fae9 --- /dev/null +++ b/mmaudio/model/networks.py @@ -0,0 +1,471 @@ +import logging +from dataclasses import dataclass +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from mmaudio.ext.rotary_embeddings import compute_rope_rotations +from mmaudio.model.embeddings import TimestepEmbedder +from mmaudio.model.low_level import MLP, ChannelLastConv1d, ConvMLP +from mmaudio.model.transformer_layers import (FinalBlock, JointBlock, MMDitSingleBlock) + +log = logging.getLogger() + + +@dataclass +class PreprocessedConditions: + clip_f: torch.Tensor + sync_f: torch.Tensor + text_f: torch.Tensor + clip_f_c: torch.Tensor + text_f_c: torch.Tensor + + +# Partially from https://github.com/facebookresearch/DiT +class MMAudio(nn.Module): + + def __init__(self, + *, + latent_dim: int, + clip_dim: int, + sync_dim: int, + text_dim: int, + hidden_dim: int, + depth: int, + fused_depth: int, + num_heads: int, + mlp_ratio: float = 4.0, + latent_seq_len: int, + clip_seq_len: int, + sync_seq_len: int, + text_seq_len: int = 77, + latent_mean: Optional[torch.Tensor] = None, + latent_std: Optional[torch.Tensor] = None, + empty_string_feat: Optional[torch.Tensor] = None, + v2: bool = False) -> None: + super().__init__() + + self.v2 = v2 + self.latent_dim = latent_dim + self._latent_seq_len = latent_seq_len + self._clip_seq_len = clip_seq_len + self._sync_seq_len = sync_seq_len + self._text_seq_len = text_seq_len + self.hidden_dim = hidden_dim + self.num_heads = num_heads + + if v2: + self.audio_input_proj = nn.Sequential( + ChannelLastConv1d(latent_dim, hidden_dim, kernel_size=7, padding=3), + nn.SiLU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3), + ) + + self.clip_input_proj = nn.Sequential( + nn.Linear(clip_dim, hidden_dim), + nn.SiLU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.sync_input_proj = nn.Sequential( + ChannelLastConv1d(sync_dim, hidden_dim, kernel_size=7, padding=3), + nn.SiLU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.text_input_proj = nn.Sequential( + nn.Linear(text_dim, hidden_dim), + nn.SiLU(), + MLP(hidden_dim, hidden_dim * 4), + ) + else: + self.audio_input_proj = nn.Sequential( + ChannelLastConv1d(latent_dim, hidden_dim, kernel_size=7, padding=3), + nn.SELU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=7, padding=3), + ) + + self.clip_input_proj = nn.Sequential( + nn.Linear(clip_dim, hidden_dim), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.sync_input_proj = nn.Sequential( + ChannelLastConv1d(sync_dim, hidden_dim, kernel_size=7, padding=3), + nn.SELU(), + ConvMLP(hidden_dim, hidden_dim * 4, kernel_size=3, padding=1), + ) + + self.text_input_proj = nn.Sequential( + nn.Linear(text_dim, hidden_dim), + MLP(hidden_dim, hidden_dim * 4), + ) + + self.clip_cond_proj = nn.Linear(hidden_dim, hidden_dim) + self.text_cond_proj = nn.Linear(hidden_dim, hidden_dim) + self.global_cond_mlp = MLP(hidden_dim, hidden_dim * 4) + # each synchformer output segment has 8 feature frames + self.sync_pos_emb = nn.Parameter(torch.zeros((1, 1, 8, sync_dim))) + + self.final_layer = FinalBlock(hidden_dim, latent_dim) + + if v2: + self.t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=hidden_dim, + max_period=1) + else: + self.t_embed = TimestepEmbedder(hidden_dim, + frequency_embedding_size=256, + max_period=10000) + self.joint_blocks = nn.ModuleList([ + JointBlock(hidden_dim, + num_heads, + mlp_ratio=mlp_ratio, + pre_only=(i == depth - fused_depth - 1)) for i in range(depth - fused_depth) + ]) + + self.fused_blocks = nn.ModuleList([ + MMDitSingleBlock(hidden_dim, num_heads, mlp_ratio=mlp_ratio, kernel_size=3, padding=1) + for i in range(fused_depth) + ]) + + if latent_mean is None: + # these values are not meant to be used + # if you don't provide mean/std here, we should load them later from a checkpoint + assert latent_std is None + latent_mean = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + latent_std = torch.ones(latent_dim).view(1, 1, -1).fill_(float('nan')) + else: + assert latent_std is not None + assert latent_mean.numel() == latent_dim, f'{latent_mean.numel()=} != {latent_dim=}' + if empty_string_feat is None: + empty_string_feat = torch.zeros((text_seq_len, text_dim)) + self.latent_mean = nn.Parameter(latent_mean.view(1, 1, -1), requires_grad=False) + self.latent_std = nn.Parameter(latent_std.view(1, 1, -1), requires_grad=False) + + self.empty_string_feat = nn.Parameter(empty_string_feat, requires_grad=False) + self.empty_clip_feat = nn.Parameter(torch.zeros(1, clip_dim), requires_grad=True) + self.empty_sync_feat = nn.Parameter(torch.zeros(1, sync_dim), requires_grad=True) + + self.initialize_weights() + self.initialize_rotations() + + def initialize_rotations(self): + base_freq = 1.0 + latent_rot = compute_rope_rotations(self._latent_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq, + device=self.device) + clip_rot = compute_rope_rotations(self._clip_seq_len, + self.hidden_dim // self.num_heads, + 10000, + freq_scaling=base_freq * self._latent_seq_len / + self._clip_seq_len, + device=self.device) + + # self.latent_rot = latent_rot.to(self.device) + # self.clip_rot = clip_rot.to(self.device) + self.register_buffer('latent_rot', latent_rot) + self.register_buffer('clip_rot', clip_rot) + + def update_seq_lengths(self, latent_seq_len: int, clip_seq_len: int, sync_seq_len: int) -> None: + self._latent_seq_len = latent_seq_len + self._clip_seq_len = clip_seq_len + self._sync_seq_len = sync_seq_len + self.initialize_rotations() + + def initialize_weights(self): + + def _basic_init(module): + if isinstance(module, nn.Linear): + torch.nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + # Initialize timestep embedding MLP: + nn.init.normal_(self.t_embed.mlp[0].weight, std=0.02) + nn.init.normal_(self.t_embed.mlp[2].weight, std=0.02) + + # Zero-out adaLN modulation layers in DiT blocks: + for block in self.joint_blocks: + nn.init.constant_(block.latent_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.latent_block.adaLN_modulation[-1].bias, 0) + nn.init.constant_(block.clip_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.clip_block.adaLN_modulation[-1].bias, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.text_block.adaLN_modulation[-1].bias, 0) + for block in self.fused_blocks: + nn.init.constant_(block.adaLN_modulation[-1].weight, 0) + nn.init.constant_(block.adaLN_modulation[-1].bias, 0) + + # Zero-out output layers: + nn.init.constant_(self.final_layer.adaLN_modulation[-1].weight, 0) + nn.init.constant_(self.final_layer.adaLN_modulation[-1].bias, 0) + nn.init.constant_(self.final_layer.conv.weight, 0) + nn.init.constant_(self.final_layer.conv.bias, 0) + + # empty string feat shall be initialized by a CLIP encoder + nn.init.constant_(self.sync_pos_emb, 0) + nn.init.constant_(self.empty_clip_feat, 0) + nn.init.constant_(self.empty_sync_feat, 0) + + def normalize(self, x: torch.Tensor) -> torch.Tensor: + # return (x - self.latent_mean) / self.latent_std + return x.sub_(self.latent_mean).div_(self.latent_std) + + def unnormalize(self, x: torch.Tensor) -> torch.Tensor: + # return x * self.latent_std + self.latent_mean + return x.mul_(self.latent_std).add_(self.latent_mean) + + def preprocess_conditions(self, clip_f: torch.Tensor, sync_f: torch.Tensor, + text_f: torch.Tensor) -> PreprocessedConditions: + """ + cache computations that do not depend on the latent/time step + i.e., the features are reused over steps during inference + """ + assert clip_f.shape[1] == self._clip_seq_len, f'{clip_f.shape=} {self._clip_seq_len=}' + assert sync_f.shape[1] == self._sync_seq_len, f'{sync_f.shape=} {self._sync_seq_len=}' + assert text_f.shape[1] == self._text_seq_len, f'{text_f.shape=} {self._text_seq_len=}' + + bs = clip_f.shape[0] + + # B * num_segments (24) * 8 * 768 + num_sync_segments = self._sync_seq_len // 8 + sync_f = sync_f.view(bs, num_sync_segments, 8, -1) + self.sync_pos_emb + sync_f = sync_f.flatten(1, 2) # (B, VN, D) + + # extend vf to match x + clip_f = self.clip_input_proj(clip_f) # (B, VN, D) + sync_f = self.sync_input_proj(sync_f) # (B, VN, D) + text_f = self.text_input_proj(text_f) # (B, VN, D) + + # upsample the sync features to match the audio + sync_f = sync_f.transpose(1, 2) # (B, D, VN) + sync_f = F.interpolate(sync_f, size=self._latent_seq_len, mode='nearest-exact') + sync_f = sync_f.transpose(1, 2) # (B, N, D) + + # get conditional features from the clip side + clip_f_c = self.clip_cond_proj(clip_f.mean(dim=1)) # (B, D) + text_f_c = self.text_cond_proj(text_f.mean(dim=1)) # (B, D) + + return PreprocessedConditions(clip_f=clip_f, + sync_f=sync_f, + text_f=text_f, + clip_f_c=clip_f_c, + text_f_c=text_f_c) + + def predict_flow(self, latent: torch.Tensor, t: torch.Tensor, + conditions: PreprocessedConditions) -> torch.Tensor: + """ + for non-cacheable computations + """ + assert latent.shape[1] == self._latent_seq_len, f'{latent.shape=} {self._latent_seq_len=}' + + clip_f = conditions.clip_f + sync_f = conditions.sync_f + text_f = conditions.text_f + clip_f_c = conditions.clip_f_c + text_f_c = conditions.text_f_c + + latent = self.audio_input_proj(latent) # (B, N, D) + global_c = self.global_cond_mlp(clip_f_c + text_f_c) # (B, D) + + global_c = self.t_embed(t).unsqueeze(1) + global_c.unsqueeze(1) # (B, D) + extended_c = global_c + sync_f + + for block in self.joint_blocks: + latent, clip_f, text_f = block(latent, clip_f, text_f, global_c, extended_c, + self.latent_rot, self.clip_rot) # (B, N, D) + + for block in self.fused_blocks: + latent = block(latent, extended_c, self.latent_rot) + + flow = self.final_layer(latent, global_c) # (B, N, out_dim), remove t + return flow + + def forward(self, latent: torch.Tensor, clip_f: torch.Tensor, sync_f: torch.Tensor, + text_f: torch.Tensor, t: torch.Tensor) -> torch.Tensor: + """ + latent: (B, N, C) + vf: (B, T, C_V) + t: (B,) + """ + conditions = self.preprocess_conditions(clip_f, sync_f, text_f) + flow = self.predict_flow(latent, t, conditions) + return flow + + def get_empty_string_sequence(self, bs: int) -> torch.Tensor: + return self.empty_string_feat.unsqueeze(0).expand(bs, -1, -1) + + def get_empty_clip_sequence(self, bs: int) -> torch.Tensor: + return self.empty_clip_feat.unsqueeze(0).expand(bs, self._clip_seq_len, -1) + + def get_empty_sync_sequence(self, bs: int) -> torch.Tensor: + return self.empty_sync_feat.unsqueeze(0).expand(bs, self._sync_seq_len, -1) + + def get_empty_conditions( + self, + bs: int, + *, + negative_text_features: Optional[torch.Tensor] = None) -> PreprocessedConditions: + if negative_text_features is not None: + empty_text = negative_text_features + else: + empty_text = self.get_empty_string_sequence(1) + + empty_clip = self.get_empty_clip_sequence(1) + empty_sync = self.get_empty_sync_sequence(1) + conditions = self.preprocess_conditions(empty_clip, empty_sync, empty_text) + conditions.clip_f = conditions.clip_f.expand(bs, -1, -1) + conditions.sync_f = conditions.sync_f.expand(bs, -1, -1) + conditions.clip_f_c = conditions.clip_f_c.expand(bs, -1) + if negative_text_features is None: + conditions.text_f = conditions.text_f.expand(bs, -1, -1) + conditions.text_f_c = conditions.text_f_c.expand(bs, -1) + + return conditions + + def ode_wrapper(self, t: torch.Tensor, latent: torch.Tensor, conditions: PreprocessedConditions, + empty_conditions: PreprocessedConditions, cfg_strength: float) -> torch.Tensor: + t = t * torch.ones(len(latent), device=latent.device, dtype=latent.dtype) + + if cfg_strength < 1.0: + return self.predict_flow(latent, t, conditions) + else: + return (cfg_strength * self.predict_flow(latent, t, conditions) + + (1 - cfg_strength) * self.predict_flow(latent, t, empty_conditions)) + + def load_weights(self, src_dict) -> None: + if 't_embed.freqs' in src_dict: + del src_dict['t_embed.freqs'] + if 'latent_rot' in src_dict: + del src_dict['latent_rot'] + if 'clip_rot' in src_dict: + del src_dict['clip_rot'] + + self.load_state_dict(src_dict, strict=False) + + @property + def device(self) -> torch.device: + return self.latent_mean.device + + @property + def latent_seq_len(self) -> int: + return self._latent_seq_len + + @property + def clip_seq_len(self) -> int: + return self._clip_seq_len + + @property + def sync_seq_len(self) -> int: + return self._sync_seq_len + + +def small_16k(**kwargs) -> MMAudio: + num_heads = 7 + return MMAudio(latent_dim=20, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=250, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def small_44k(**kwargs) -> MMAudio: + num_heads = 7 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def medium_44k(**kwargs) -> MMAudio: + num_heads = 14 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=12, + fused_depth=8, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def large_44k(**kwargs) -> MMAudio: + num_heads = 14 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=21, + fused_depth=14, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + **kwargs) + + +def large_44k_v2(**kwargs) -> MMAudio: + num_heads = 14 + return MMAudio(latent_dim=40, + clip_dim=1024, + sync_dim=768, + text_dim=1024, + hidden_dim=64 * num_heads, + depth=21, + fused_depth=14, + num_heads=num_heads, + latent_seq_len=345, + clip_seq_len=64, + sync_seq_len=192, + v2=True, + **kwargs) + + +def get_my_mmaudio(name: str, **kwargs) -> MMAudio: + if name == 'small_16k': + return small_16k(**kwargs) + if name == 'small_44k': + return small_44k(**kwargs) + if name == 'medium_44k': + return medium_44k(**kwargs) + if name == 'large_44k': + return large_44k(**kwargs) + if name == 'large_44k_v2': + return large_44k_v2(**kwargs) + + raise ValueError(f'Unknown model name: {name}') + + +if __name__ == '__main__': + network = get_my_mmaudio('small_16k') + + # print the number of parameters in terms of millions + num_params = sum(p.numel() for p in network.parameters()) / 1e6 + print(f'Number of parameters: {num_params:.2f}M') diff --git a/mmaudio/model/sequence_config.py b/mmaudio/model/sequence_config.py new file mode 100644 index 0000000000000000000000000000000000000000..14269014dc401b4751d172466813a935fddda6c1 --- /dev/null +++ b/mmaudio/model/sequence_config.py @@ -0,0 +1,58 @@ +import dataclasses +import math + + +@dataclasses.dataclass +class SequenceConfig: + # general + duration: float + + # audio + sampling_rate: int + spectrogram_frame_rate: int + latent_downsample_rate: int = 2 + + # visual + clip_frame_rate: int = 8 + sync_frame_rate: int = 25 + sync_num_frames_per_segment: int = 16 + sync_step_size: int = 8 + sync_downsample_rate: int = 2 + + @property + def num_audio_frames(self) -> int: + # we need an integer number of latents + return self.latent_seq_len * self.spectrogram_frame_rate * self.latent_downsample_rate + + @property + def latent_seq_len(self) -> int: + return int( + math.ceil(self.duration * self.sampling_rate / self.spectrogram_frame_rate / + self.latent_downsample_rate)) + + @property + def clip_seq_len(self) -> int: + return int(self.duration * self.clip_frame_rate) + + @property + def sync_seq_len(self) -> int: + num_frames = self.duration * self.sync_frame_rate + num_segments = (num_frames - self.sync_num_frames_per_segment) // self.sync_step_size + 1 + return int(num_segments * self.sync_num_frames_per_segment / self.sync_downsample_rate) + + +CONFIG_16K = SequenceConfig(duration=8.0, sampling_rate=16000, spectrogram_frame_rate=256) +CONFIG_44K = SequenceConfig(duration=8.0, sampling_rate=44100, spectrogram_frame_rate=512) + +if __name__ == '__main__': + assert CONFIG_16K.latent_seq_len == 250 + assert CONFIG_16K.clip_seq_len == 64 + assert CONFIG_16K.sync_seq_len == 192 + assert CONFIG_16K.num_audio_frames == 128000 + + assert CONFIG_44K.latent_seq_len == 345 + assert CONFIG_44K.clip_seq_len == 64 + assert CONFIG_44K.sync_seq_len == 192 + assert CONFIG_44K.num_audio_frames == 353280 + + print('Passed') diff --git a/mmaudio/model/transformer_layers.py b/mmaudio/model/transformer_layers.py new file mode 100644 index 0000000000000000000000000000000000000000..3ca02ec3b6c00b9c39624d97d55a211cdd2e427d --- /dev/null +++ b/mmaudio/model/transformer_layers.py @@ -0,0 +1,203 @@ +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from einops.layers.torch import Rearrange +from torch.nn.attention import SDPBackend, sdpa_kernel + +from mmaudio.ext.rotary_embeddings import apply_rope +from mmaudio.model.low_level import MLP, ChannelLastConv1d, ConvMLP + + +def modulate(x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor): + return x * (1 + scale) + shift + + +def attention(q: torch.Tensor, k: torch.Tensor, v: torch.Tensor): + # training will crash without these contiguous calls and the CUDNN limitation + # I believe this is related to https://github.com/pytorch/pytorch/issues/133974 + # unresolved at the time of writing + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + out = F.scaled_dot_product_attention(q, k, v) + out = rearrange(out, 'b h n d -> b n (h d)').contiguous() + return out + + +class SelfAttention(nn.Module): + + def __init__(self, dim: int, nheads: int): + super().__init__() + self.dim = dim + self.nheads = nheads + + self.qkv = nn.Linear(dim, dim * 3, bias=True) + self.q_norm = nn.RMSNorm(dim // nheads) + self.k_norm = nn.RMSNorm(dim // nheads) + + self.split_into_heads = Rearrange('b n (h d j) -> b h n d j', + h=nheads, + d=dim // nheads, + j=3) + + def pre_attention( + self, x: torch.Tensor, + rot: Optional[torch.Tensor]) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + # x: batch_size * n_tokens * n_channels + qkv = self.qkv(x) + q, k, v = self.split_into_heads(qkv).chunk(3, dim=-1) + q = q.squeeze(-1) + k = k.squeeze(-1) + v = v.squeeze(-1) + q = self.q_norm(q) + k = self.k_norm(k) + + if rot is not None: + q = apply_rope(q, rot) + k = apply_rope(k, rot) + + return q, k, v + + def forward( + self, + x: torch.Tensor, # batch_size * n_tokens * n_channels + ) -> torch.Tensor: + q, v, k = self.pre_attention(x) + out = attention(q, k, v) + return out + + +class MMDitSingleBlock(nn.Module): + + def __init__(self, + dim: int, + nhead: int, + mlp_ratio: float = 4.0, + pre_only: bool = False, + kernel_size: int = 7, + padding: int = 3): + super().__init__() + self.norm1 = nn.LayerNorm(dim, elementwise_affine=False) + self.attn = SelfAttention(dim, nhead) + + self.pre_only = pre_only + if pre_only: + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 2 * dim, bias=True)) + else: + if kernel_size == 1: + self.linear1 = nn.Linear(dim, dim) + else: + self.linear1 = ChannelLastConv1d(dim, dim, kernel_size=kernel_size, padding=padding) + self.norm2 = nn.LayerNorm(dim, elementwise_affine=False) + + if kernel_size == 1: + self.ffn = MLP(dim, int(dim * mlp_ratio)) + else: + self.ffn = ConvMLP(dim, + int(dim * mlp_ratio), + kernel_size=kernel_size, + padding=padding) + + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 6 * dim, bias=True)) + + def pre_attention(self, x: torch.Tensor, c: torch.Tensor, rot: Optional[torch.Tensor]): + # x: BS * N * D + # cond: BS * D + modulation = self.adaLN_modulation(c) + if self.pre_only: + (shift_msa, scale_msa) = modulation.chunk(2, dim=-1) + gate_msa = shift_mlp = scale_mlp = gate_mlp = None + else: + (shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, + gate_mlp) = modulation.chunk(6, dim=-1) + + x = modulate(self.norm1(x), shift_msa, scale_msa) + q, k, v = self.attn.pre_attention(x, rot) + return (q, k, v), (gate_msa, shift_mlp, scale_mlp, gate_mlp) + + def post_attention(self, x: torch.Tensor, attn_out: torch.Tensor, c: tuple[torch.Tensor]): + if self.pre_only: + return x + + (gate_msa, shift_mlp, scale_mlp, gate_mlp) = c + x = x + self.linear1(attn_out) * gate_msa + r = modulate(self.norm2(x), shift_mlp, scale_mlp) + x = x + self.ffn(r) * gate_mlp + + return x + + def forward(self, x: torch.Tensor, cond: torch.Tensor, + rot: Optional[torch.Tensor]) -> torch.Tensor: + # x: BS * N * D + # cond: BS * D + x_qkv, x_conditions = self.pre_attention(x, cond, rot) + attn_out = attention(*x_qkv) + x = self.post_attention(x, attn_out, x_conditions) + + return x + + +class JointBlock(nn.Module): + + def __init__(self, dim: int, nhead: int, mlp_ratio: float = 4.0, pre_only: bool = False): + super().__init__() + self.pre_only = pre_only + self.latent_block = MMDitSingleBlock(dim, + nhead, + mlp_ratio, + pre_only=False, + kernel_size=3, + padding=1) + self.clip_block = MMDitSingleBlock(dim, + nhead, + mlp_ratio, + pre_only=pre_only, + kernel_size=3, + padding=1) + self.text_block = MMDitSingleBlock(dim, nhead, mlp_ratio, pre_only=pre_only, kernel_size=1) + + def forward(self, latent: torch.Tensor, clip_f: torch.Tensor, text_f: torch.Tensor, + global_c: torch.Tensor, extended_c: torch.Tensor, latent_rot: torch.Tensor, + clip_rot: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]: + # latent: BS * N1 * D + # clip_f: BS * N2 * D + # c: BS * (1/N) * D + x_qkv, x_mod = self.latent_block.pre_attention(latent, extended_c, latent_rot) + c_qkv, c_mod = self.clip_block.pre_attention(clip_f, global_c, clip_rot) + t_qkv, t_mod = self.text_block.pre_attention(text_f, global_c, rot=None) + + latent_len = latent.shape[1] + clip_len = clip_f.shape[1] + text_len = text_f.shape[1] + + joint_qkv = [torch.cat([x_qkv[i], c_qkv[i], t_qkv[i]], dim=2) for i in range(3)] + + attn_out = attention(*joint_qkv) + x_attn_out = attn_out[:, :latent_len] + c_attn_out = attn_out[:, latent_len:latent_len + clip_len] + t_attn_out = attn_out[:, latent_len + clip_len:] + + latent = self.latent_block.post_attention(latent, x_attn_out, x_mod) + if not self.pre_only: + clip_f = self.clip_block.post_attention(clip_f, c_attn_out, c_mod) + text_f = self.text_block.post_attention(text_f, t_attn_out, t_mod) + + return latent, clip_f, text_f + + +class FinalBlock(nn.Module): + + def __init__(self, dim, out_dim): + super().__init__() + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim, 2 * dim, bias=True)) + self.norm = nn.LayerNorm(dim, elementwise_affine=False) + self.conv = ChannelLastConv1d(dim, out_dim, kernel_size=7, padding=3) + + def forward(self, latent, c): + shift, scale = self.adaLN_modulation(c).chunk(2, dim=-1) + latent = modulate(self.norm(latent), shift, scale) + latent = self.conv(latent) + return latent diff --git a/mmaudio/model/utils/__init__.py b/mmaudio/model/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/model/utils/distributions.py b/mmaudio/model/utils/distributions.py new file mode 100644 index 0000000000000000000000000000000000000000..1d526a5b0b3dd2ae556d806a3397e1cf43c07fb9 --- /dev/null +++ b/mmaudio/model/utils/distributions.py @@ -0,0 +1,46 @@ +from typing import Optional + +import numpy as np +import torch + + +class DiagonalGaussianDistribution: + + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self, rng: Optional[torch.Generator] = None): + # x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + + r = torch.empty_like(self.mean).normal_(generator=rng) + x = self.mean + self.std * r + + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + + return 0.5 * torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar + else: + return 0.5 * (torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar) + + def nll(self, sample, dims=[1, 2, 3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean diff --git a/mmaudio/model/utils/features_utils.py b/mmaudio/model/utils/features_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8b5ebcf685d98d9f024ce29df239e93312418bae --- /dev/null +++ b/mmaudio/model/utils/features_utils.py @@ -0,0 +1,164 @@ +from typing import Literal, Optional + +import open_clip +import torch +import torch.nn as nn +import torch.nn.functional as F +from einops import rearrange +from open_clip import create_model_from_pretrained +from torchvision.transforms import Normalize + +from mmaudio.ext.autoencoder import AutoEncoderModule +from mmaudio.ext.mel_converter import MelConverter +from mmaudio.ext.synchformer import Synchformer +from mmaudio.model.utils.distributions import DiagonalGaussianDistribution + + +def patch_clip(clip_model): + # a hack to make it output last hidden states + # https://github.com/mlfoundations/open_clip/blob/fc5a37b72d705f760ebbc7915b84729816ed471f/src/open_clip/model.py#L269 + def new_encode_text(self, text, normalize: bool = False): + cast_dtype = self.transformer.get_cast_dtype() + + x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model] + + x = x + self.positional_embedding.to(cast_dtype) + x = self.transformer(x, attn_mask=self.attn_mask) + x = self.ln_final(x) # [batch_size, n_ctx, transformer.width] + return F.normalize(x, dim=-1) if normalize else x + + clip_model.encode_text = new_encode_text.__get__(clip_model) + return clip_model + + +class FeaturesUtils(nn.Module): + + def __init__( + self, + *, + tod_vae_ckpt: Optional[str] = None, + bigvgan_vocoder_ckpt: Optional[str] = None, + synchformer_ckpt: Optional[str] = None, + enable_conditions: bool = True, + mode=Literal['16k', '44k'], + need_vae_encoder: bool = True, + ): + super().__init__() + + if enable_conditions: + self.clip_model = create_model_from_pretrained('hf-hub:apple/DFN5B-CLIP-ViT-H-14-384', + return_transform=False) + self.clip_preprocess = Normalize(mean=[0.48145466, 0.4578275, 0.40821073], + std=[0.26862954, 0.26130258, 0.27577711]) + self.clip_model = patch_clip(self.clip_model) + + self.synchformer = Synchformer() + self.synchformer.load_state_dict( + torch.load(synchformer_ckpt, weights_only=True, map_location='cpu')) + + self.tokenizer = open_clip.get_tokenizer('ViT-H-14-378-quickgelu') # same as 'ViT-H-14' + else: + self.clip_model = None + self.synchformer = None + self.tokenizer = None + + if tod_vae_ckpt is not None: + self.tod = AutoEncoderModule(vae_ckpt_path=tod_vae_ckpt, + vocoder_ckpt_path=bigvgan_vocoder_ckpt, + mode=mode, + need_vae_encoder=need_vae_encoder) + else: + self.tod = None + self.mel_converter = MelConverter() + + def compile(self): + if self.clip_model is not None: + self.clip_model.encode_image = torch.compile(self.clip_model.encode_image) + self.clip_model.encode_text = torch.compile(self.clip_model.encode_text) + if self.synchformer is not None: + self.synchformer = torch.compile(self.synchformer) + self.decode = torch.compile(self.decode) + self.vocode = torch.compile(self.vocode) + + def train(self, mode: bool) -> None: + return super().train(False) + + @torch.inference_mode() + def encode_video_with_clip(self, x: torch.Tensor, batch_size: int = -1) -> torch.Tensor: + assert self.clip_model is not None, 'CLIP is not loaded' + # x: (B, T, C, H, W) H/W: 384 + b, t, c, h, w = x.shape + assert c == 3 and h == 384 and w == 384 + x = self.clip_preprocess(x) + x = rearrange(x, 'b t c h w -> (b t) c h w') + outputs = [] + if batch_size < 0: + batch_size = b * t + for i in range(0, b * t, batch_size): + outputs.append(self.clip_model.encode_image(x[i:i + batch_size], normalize=True)) + x = torch.cat(outputs, dim=0) + # x = self.clip_model.encode_image(x, normalize=True) + x = rearrange(x, '(b t) d -> b t d', b=b) + return x + + @torch.inference_mode() + def encode_video_with_sync(self, x: torch.Tensor, batch_size: int = -1) -> torch.Tensor: + assert self.synchformer is not None, 'Synchformer is not loaded' + # x: (B, T, C, H, W) H/W: 384 + + b, t, c, h, w = x.shape + assert c == 3 and h == 224 and w == 224 + + # partition the video + segment_size = 16 + step_size = 8 + num_segments = (t - segment_size) // step_size + 1 + segments = [] + for i in range(num_segments): + segments.append(x[:, i * step_size:i * step_size + segment_size]) + x = torch.stack(segments, dim=1) # (B, S, T, C, H, W) + + outputs = [] + if batch_size < 0: + batch_size = b + x = rearrange(x, 'b s t c h w -> (b s) 1 t c h w') + for i in range(0, b * num_segments, batch_size): + outputs.append(self.synchformer(x[i:i + batch_size])) + x = torch.cat(outputs, dim=0) + x = rearrange(x, '(b s) 1 t d -> b (s t) d', b=b) + return x + + @torch.inference_mode() + def encode_text(self, text: list[str]) -> torch.Tensor: + assert self.clip_model is not None, 'CLIP is not loaded' + assert self.tokenizer is not None, 'Tokenizer is not loaded' + # x: (B, L) + tokens = self.tokenizer(text).to(self.device) + return self.clip_model.encode_text(tokens, normalize=True) + + @torch.inference_mode() + def encode_audio(self, x) -> DiagonalGaussianDistribution: + assert self.tod is not None, 'VAE is not loaded' + # x: (B * L) + mel = self.mel_converter(x) + dist = self.tod.encode(mel) + + return dist + + @torch.inference_mode() + def vocode(self, mel: torch.Tensor) -> torch.Tensor: + assert self.tod is not None, 'VAE is not loaded' + return self.tod.vocode(mel) + + @torch.inference_mode() + def decode(self, z: torch.Tensor) -> torch.Tensor: + assert self.tod is not None, 'VAE is not loaded' + return self.tod.decode(z.transpose(1, 2)) + + @property + def device(self): + return next(self.parameters()).device + + @property + def dtype(self): + return next(self.parameters()).dtype diff --git a/mmaudio/model/utils/parameter_groups.py b/mmaudio/model/utils/parameter_groups.py new file mode 100644 index 0000000000000000000000000000000000000000..89c3993083f470dfc6b18a5c90f908ea37bde12b --- /dev/null +++ b/mmaudio/model/utils/parameter_groups.py @@ -0,0 +1,72 @@ +import logging + +log = logging.getLogger() + + +def get_parameter_groups(model, cfg, print_log=False): + """ + Assign different weight decays and learning rates to different parameters. + Returns a parameter group which can be passed to the optimizer. + """ + weight_decay = cfg.weight_decay + # embed_weight_decay = cfg.embed_weight_decay + # backbone_lr_ratio = cfg.backbone_lr_ratio + base_lr = cfg.learning_rate + + backbone_params = [] + embed_params = [] + other_params = [] + + # embedding_names = ['summary_pos', 'query_init', 'query_emb', 'obj_pe'] + # embedding_names = [e + '.weight' for e in embedding_names] + + # inspired by detectron2 + memo = set() + for name, param in model.named_parameters(): + if not param.requires_grad: + continue + # Avoid duplicating parameters + if param in memo: + continue + memo.add(param) + + if name.startswith('module'): + name = name[7:] + + inserted = False + # if name.startswith('pixel_encoder.'): + # backbone_params.append(param) + # inserted = True + # if print_log: + # log.info(f'{name} counted as a backbone parameter.') + # else: + # for e in embedding_names: + # if name.endswith(e): + # embed_params.append(param) + # inserted = True + # if print_log: + # log.info(f'{name} counted as an embedding parameter.') + # break + + # if not inserted: + other_params.append(param) + + parameter_groups = [ + # { + # 'params': backbone_params, + # 'lr': base_lr * backbone_lr_ratio, + # 'weight_decay': weight_decay + # }, + # { + # 'params': embed_params, + # 'lr': base_lr, + # 'weight_decay': embed_weight_decay + # }, + { + 'params': other_params, + 'lr': base_lr, + 'weight_decay': weight_decay + }, + ] + + return parameter_groups diff --git a/mmaudio/model/utils/sample_utils.py b/mmaudio/model/utils/sample_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..d44cf278e0b464bc6ac7e240fcab4a23895caa2f --- /dev/null +++ b/mmaudio/model/utils/sample_utils.py @@ -0,0 +1,12 @@ +from typing import Optional + +import torch + + +def log_normal_sample(x: torch.Tensor, + generator: Optional[torch.Generator] = None, + m: float = 0.0, + s: float = 1.0) -> torch.Tensor: + bs = x.shape[0] + s = torch.randn(bs, device=x.device, generator=generator) * s + m + return torch.sigmoid(s) diff --git a/mmaudio/utils/__init__.py b/mmaudio/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/mmaudio/utils/dist_utils.py b/mmaudio/utils/dist_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..354229b5d94bd03d104a07c7f16a06df9b519bdd --- /dev/null +++ b/mmaudio/utils/dist_utils.py @@ -0,0 +1,17 @@ +import os +from logging import Logger + +from mmaudio.utils.logger import TensorboardLogger + +local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0 +world_size = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1 + + +def info_if_rank_zero(logger: Logger, msg: str): + if local_rank == 0: + logger.info(msg) + + +def string_if_rank_zero(logger: TensorboardLogger, tag: str, msg: str): + if local_rank == 0: + logger.log_string(tag, msg) diff --git a/mmaudio/utils/download_utils.py b/mmaudio/utils/download_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..1d193efdb6dd7811d866dcdfbdfc471a5a2f0592 --- /dev/null +++ b/mmaudio/utils/download_utils.py @@ -0,0 +1,84 @@ +import hashlib +import logging +from pathlib import Path + +import requests +from tqdm import tqdm + +log = logging.getLogger() + +links = [ + { + 'name': 'mmaudio_small_16k.pth', + 'url': 'https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_small_16k.pth', + 'md5': 'af93cde404179f58e3919ac085b8033b', + }, + { + 'name': 'mmaudio_small_44k.pth', + 'url': 'https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_small_44k.pth', + 'md5': 'babd74c884783d13701ea2820a5f5b6d', + }, + { + 'name': 'mmaudio_medium_44k.pth', + 'url': 'https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_medium_44k.pth', + 'md5': '5a56b6665e45a1e65ada534defa903d0', + }, + { + 'name': 'mmaudio_large_44k.pth', + 'url': 'https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_large_44k.pth', + 'md5': 'fed96c325a6785b85ce75ae1aafd2673' + }, + { + 'name': 'mmaudio_large_44k_v2.pth', + 'url': 'https://huggingface.co/hkchengrex/MMAudio/resolve/main/weights/mmaudio_large_44k_v2.pth', + 'md5': '01ad4464f049b2d7efdaa4c1a59b8dfe' + }, + { + 'name': 'v1-16.pth', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-16.pth', + 'md5': '69f56803f59a549a1a507c93859fd4d7' + }, + { + 'name': 'best_netG.pt', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/best_netG.pt', + 'md5': 'eeaf372a38a9c31c362120aba2dde292' + }, + { + 'name': 'v1-44.pth', + 'url': 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/v1-44.pth', + 'md5': 'fab020275fa44c6589820ce025191600' + }, + { + 'name': 'synchformer_state_dict.pth', + 'url': + 'https://github.com/hkchengrex/MMAudio/releases/download/v0.1/synchformer_state_dict.pth', + 'md5': '5b2f5594b0730f70e41e549b7c94390c' + }, +] + + +def download_model_if_needed(model_path: Path): + base_name = model_path.name + + for link in links: + if link['name'] == base_name: + target_link = link + break + else: + raise ValueError(f'No link found for {base_name}') + + model_path.parent.mkdir(parents=True, exist_ok=True) + if not model_path.exists() or hashlib.md5(open(model_path, + 'rb').read()).hexdigest() != target_link['md5']: + log.info(f'Downloading {base_name} to {model_path}...') + r = requests.get(target_link['url'], stream=True) + total_size = int(r.headers.get('content-length', 0)) + block_size = 1024 + t = tqdm(total=total_size, unit='iB', unit_scale=True) + with open(model_path, 'wb') as f: + for data in r.iter_content(block_size): + t.update(len(data)) + f.write(data) + t.close() + if total_size != 0 and t.n != total_size: + raise RuntimeError('Error while downloading %s' % base_name) diff --git a/models/.gitkeep b/models/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/optimization.py b/optimization.py new file mode 100644 index 0000000000000000000000000000000000000000..5c4fdb6393275b2e3e9cafe6a3eb8dd7e2ab4371 --- /dev/null +++ b/optimization.py @@ -0,0 +1,45 @@ +# optimization.py +# Focado apenas na otimização estável de quantização FP8. + +import torch +import logging +from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight + +# Usamos type hints com strings para evitar importações circulares +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from ltx_manager_helpers import LtxWorker + +logger = logging.getLogger(__name__) + +def can_optimize_fp8(): + """Verifica se a GPU atual suporta otimizações FP8.""" + if not torch.cuda.is_available(): + return False + + major, _ = torch.cuda.get_device_capability() + + if major >= 9: # Arquitetura Hopper + logger.info(f"GPU com arquitetura Hopper ou superior (CC {major}.x) detectada. Ativando quantização FP8.") + return True + + if major == 8: + device_name = torch.cuda.get_device_name(0).lower() + if "h100" in device_name or "l40" in device_name or "rtx 40" in device_name: # Arquitetura Ada Lovelace + logger.info(f"GPU com arquitetura Ada Lovelace (CC 8.9, Nome: {device_name}) detectada. Ativando quantização FP8.") + return True + + logger.warning(f"A GPU atual (CC {major}.x) não tem suporte otimizado para FP8. Pulando quantização.") + return False + +@torch.no_grad() +def optimize_ltx_worker(worker: "LtxWorker"): + """Aplica quantização FP8 ao transformador do pipeline LTX.""" + pipeline = worker.pipeline + device = worker.device + + logger.info(f"Iniciando quantização FP8 do transformador LTX no dispositivo {device}...") + quantize_(pipeline.transformer, float8_dynamic_activation_float8_weight()) + + torch.cuda.empty_cache() + logger.info(f"Quantização FP8 do LTX Worker no dispositivo {device} concluída com sucesso!") \ No newline at end of file diff --git a/prompts/anticipatory_keyframe_prompt.txt b/prompts/anticipatory_keyframe_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..562c1f06b78d88426ba74139bc58ab6f90c07bc9 --- /dev/null +++ b/prompts/anticipatory_keyframe_prompt.txt @@ -0,0 +1,29 @@ +# ROLE: AI Cinematographer and Storyboard Artist + +# GOAL: +Your task is to generate a single, descriptive prompt for an image generation model (Flux). This prompt must describe a keyframe that serves as a perfect visual transition BETWEEN a current scene and a future scene. You must see what you just did, where you are, and where you are preparing to go. + +# CRITICAL DIRECTIVES: +1. **SYNTHESIZE, DON'T DESCRIBE:** Do not simply describe the "Current Scene" or the "Future Scene". Your prompt must create a visual concept that exists *in the moment between them*. It's the "in-between" frame. + +2. **VISUAL ANCHORING:** The primary visual canvas is the "Current Base Image" (`[IMG-BASE]`). Your generated prompt should describe an evolution FROM this image. Maintain its environment and characters unless the narrative arc demands a change. + +3. **NARRATIVE FORESHADOWING:** The prompt must contain visual elements that hint at or prepare for the "Future Scene". If the future scene is "the chicken climbs the sidewalk", your prompt for the current scene ("the chicken crosses the road") might be "the chicken, halfway across the road, lifts its head and looks towards the curb of the sidewalk". + +4. **LEARN FROM THE PAST:** Analyze the "Previous Prompt" to understand the creative choices that led to the "Current Base Image". Maintain that stylistic and narrative trajectory. + +5. **REFERENCE POOL:** Use the "General Reference Images" (`[IMG-REF-X]`) to maintain the identity and style of key subjects throughout the sequence. + +# CONTEXT FOR YOUR DECISION: +- **Previous Prompt (What I thought to create the current image):** +{historico_prompt} + +- **Current Scene (Where I am now):** "{cena_atual}" +- **Future Scene (Where I am going next):** "{cena_futura}" + +# VISUAL ASSETS: +# [The "Current Base Image" will be tagged as [IMG-BASE].] +# [The "General Reference Images" will be tagged as [IMG-REF-1], [IMG-REF-2], etc.] + +# RESPONSE FORMAT: +Respond with ONLY the final, single-line prompt string for the image generator. \ No newline at end of file diff --git a/prompts/audio_director_prompt.txt b/prompts/audio_director_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..8339fdd18943260e5b9b7b2a69abbd0aead32372 --- /dev/null +++ b/prompts/audio_director_prompt.txt @@ -0,0 +1,18 @@ +# ROLE: AI Audio Director and Sound Designer + +# GOAL: +Analyze the provided film script/storyboard. Based on the overall narrative and mood, generate two distinct prompts for audio generation: one for a background music score and one for ambient sound effects (SFX). + +# INSTRUCTIONS: +1. **Analyze the Story:** Read the "Global Idea" and the "Scene Storyboard" to understand the plot, pacing, and emotional tone of the film. +2. **Create Music Prompt:** Write a concise, descriptive prompt for a music generation model (like MusicGen). Focus on genre, mood, instruments, and tempo. Example: "An epic and adventurous orchestral score, with heroic brass fanfares and powerful percussion, cinematic fantasy." +3. **Create SFX Prompt:** Write a concise, descriptive prompt for an audio generation model (like AudioLDM2). Focus on ambient sounds and key effects that match the scenes. Example: "The sound of a gentle breeze rustling through tall savanna grass, distant animal calls, and the heavy footsteps of a large creature walking on dry earth." +4. **Output Format:** You MUST respond with a single, clean JSON object with exactly two keys: "music_prompt" and "sfx_prompt". + +# == PROVIDED CONTEXT == +- **Global Idea:** "{global_prompt}" +- **Scene Storyboard:** +{storyboard_str} + +# == YOUR TASK == +# Generate the JSON response with the two audio prompts. \ No newline at end of file diff --git a/prompts/cinematic_director_prompt.txt b/prompts/cinematic_director_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..cfbd0a8d724e79ef980e62296820a114012eac97 --- /dev/null +++ b/prompts/cinematic_director_prompt.txt @@ -0,0 +1,45 @@ +# ROLE: AI Film Director & Cinematographer + +# GOAL: +Your primary task is to act as a film director. You will analyze the full context of a scene—its past, present, and future—to make a crucial editing decision ("continuous" or "cut"). After deciding, you must write a single, rich, cinematic motion prompt to generate the video for the PRESENT act. + +# CONTEXT FOR YOUR DECISION: +You will receive a complete picture of the narrative timeline. + +- **Global Story Goal:** The main theme of the entire film. + - `{global_prompt}` + +- **Creative History:** The sequence of motion prompts you have already created. This is your memory. + - `{story_history}` + +- **The Past (Where you came from):** + - Textual Past (Ato_n-1): "{past_scene_desc}" + - Visual Past (Keyframe k_n-1): [PAST_IMAGE] + +- **The Present (Where you are now):** + - Textual Present (Ato_n): "{present_scene_desc}" + - Visual Present (Keyframe k_n): [PRESENT_IMAGE] + +- **The Future (Where you are going):** + - Textual Future (Ato_n+1): "{future_scene_desc}" + - Visual Future (Keyframe k_n+1): [FUTURE_IMAGE] + +# --- TASK 1: THE EDITING DECISION --- +Analyze the transition from the PRESENT (`k_n`) to the FUTURE (`k_n+1`). +- If there is a major, non-continuous jump (e.g., scene changes from day to night, character teleports, location is completely different), you MUST decide this is a "cut". This is a critical break in the action. +- Otherwise, if the action can flow logically from the present to the future, decide it is "continuous". + +# --- TASK 2: THE CINEMATIC MOTION PROMPT --- +Based on your decision, write the `motion_prompt`. The prompt MUST describe the action that moves the story from the PRESENT visual (`k_n`) towards the FUTURE visual (`k_n+1`). + +**CRITICAL PROMPT DIRECTIVES:** +1. **ALWAYS DESCRIBE MOTION:** The scene must not be static. Something must always be moving. +2. **STYLE:** Be descriptive, cinematic, and direct. Use the user's `Global Story Goal` as a stylistic guide. +3. **STRUCTURE:** In a single paragraph (under 150 words), describe the scene's motion, prioritizing in this EXACT order: + a. **Actors/Animals:** What are they doing? Where did they come from, how are they moving, where are they going? Describe actions and expressions. + b. **Objects:** How do objects interact with the actors or the environment? + c. **Camera:** How is the camera moving? (e.g., "slow pan from left to right", "dolly zoom focusing on the character's face", "dynamic tracking shot following the action"). + d. **Scenery/Environment:** Describe environmental details that add to the motion and mood (e.g., "wind rustling the leaves", "rain streaks down the window"). + +# RESPONSE FORMAT: +You MUST respond with a single, clean JSON object with two keys: "transition_type" and "motion_prompt". \ No newline at end of file diff --git a/prompts/director_composition_prompt.txt b/prompts/director_composition_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..6663bc64e0f74a4d105a9c06cb072cebd4d3ba67 --- /dev/null +++ b/prompts/director_composition_prompt.txt @@ -0,0 +1,27 @@ +# ROLE: AI Animation Director (Key Pose) + +# GOAL: +Generate a single, powerful, CLIP-style prompt to create the NEXT keyframe in a sequence. Your goal is to describe a logical and visually coherent evolution FROM the last generated image. + +# CRITICAL DIRECTIVES: +1. **ANCHOR TO THE PREVIOUS SCENE:** The last generated image, tagged as `[IMG-1]`, represents the END of the previous scene. Your new prompt MUST describe what happens IMMEDIATELY AFTER. Treat `[IMG-1]` as your primary visual and environmental canvas. + +2. **EVOLVE, DO NOT REPLACE:** Unless the "Current Scene Description" explicitly describes a major change in location or character (e.g., "cut to a new scene"), you must maintain the environment, lighting, and core subjects from `[IMG-1]`. Your prompt should describe how the characters and objects *within* that scene evolve. + +3. **POSE, NOT PANNING:** Describe the state of the subject at a specific instant, not camera movement. Focus on body language, expression, and interaction that logically follows the previous state. + +4. **USE REFERENCES FOR CONTINUITY:** Use the `[IMG-X]` tags provided to maintain the identity of characters and objects across frames. Prioritize `[IMG-1]` for environmental context. + +5. **BE A DIRECTOR:** Use strong, active verbs. Instead of "the lion is now sitting", prefer "the lion lowers its body, muscles tensing as it settles onto the dry grass". + +# CONTEXT: +- Global Story Goal: "{global_prompt}" +- Current Scene Description: "{current_scene_desc}" +- Scene History (what happened before): +{history_scene} + +# VISUAL ASSETS FOR ANALYSIS: +# [Images will be provided and tagged as [IMG-1] (Last Image/Environment), [IMG-2] (Character Ref), etc.] + +# RESPONSE FORMAT: +Respond with ONLY the final, single-line prompt string. \ No newline at end of file diff --git a/prompts/flux_composition_wrapper_prompt.txt b/prompts/flux_composition_wrapper_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..99cf7f230e6d2e9c19169e84097e0fb5032bf0b0 --- /dev/null +++ b/prompts/flux_composition_wrapper_prompt.txt @@ -0,0 +1 @@ +From the provided reference images, create a single, natural, and cohesive scene where: {target_prompt} \ No newline at end of file diff --git a/prompts/initial_motion_prompt.txt b/prompts/initial_motion_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..768ee3dcce5df1d1febd0cc529b93a7e1f40b1ee --- /dev/null +++ b/prompts/initial_motion_prompt.txt @@ -0,0 +1,20 @@ +# ROLE: AI Cinematographer (Initial Scene) + +# GOAL: +Create a single, concise, CLIP-style motion prompt. The prompt must describe a coherent video sequence that transitions from a STARTING image to a DESTINATION image. + +# INSTRUCTIONS: +1. **Analyze the Arc:** Understand the visual and narrative journey from the START to the DESTINATION image. +2. **Describe the Motion:** Focus on DYNAMICS (camera and subject movement). +3. **Style Guide:** Use dense, descriptive, cinematic keywords. Omit fluff like "The video shows...". Be direct. + +# CONTEXT: +- Overall Story Goal: "{user_prompt}" +- Destination Scene Description: "{destination_scene_description}" + +# SCENE ANALYSIS: +# START Image: [Image 1] +# DESTINATION Image: [Image 2] + +# RESPONSE FORMAT: +Respond with ONLY the raw prompt string. \ No newline at end of file diff --git a/prompts/keyframe_selection_prompt.txt b/prompts/keyframe_selection_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..d74ee636ead3b444c1721abb48e21d413532e91d --- /dev/null +++ b/prompts/keyframe_selection_prompt.txt @@ -0,0 +1,20 @@ +# ROLE: AI Film Editor / Photographer + +# GOAL: +You are tasked with selecting the best keyframe for each scene of a storyboard to create a coherent visual narrative. You have a "scene bank" of available images. Your selections must create a smooth and logical progression. + +# INSTRUCTIONS: +1. **Analyze the Storyboard:** Read each scene description carefully to understand the intended action and emotion. +2. **Prioritize Continuity:** For each scene, your primary goal is to find an image from the "Image Pool" that represents a logical **next step** from the previously selected scene. Avoid jarring jumps in location, lighting, or character appearance unless the storyboard explicitly calls for a "cut". +3. **Maintain Consistency:** Your choices must be consistent with the characters and style established in the "Reference Images (Story Base)". +4. **Select the Best Fit:** If multiple images could work, choose the one that best captures the specific action or mood of the current scene description. +5. **Output Format:** You MUST respond with a single, clean JSON object with one key: "selected_image_identifiers". The value should be an array of strings, where each string is the identifier of the chosen image (e.g., "IMG-3"). The order of the array must match the order of the scenes in the storyboard. The length of the array must be exactly the same as the number of scenes. + +# == PROVIDED CONTEXT == +- **Storyboard:** +{storyboard_str} + +- **Available Image Identifiers in Pool:** {image_identifiers} + +# == YOUR TASK == +# Generate the JSON response with the selected image identifiers, prioritizing a smooth visual and narrative flow from one selection to the next. \ No newline at end of file diff --git a/prompts/sound_director_prompt.txt b/prompts/sound_director_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..1611f6d03642b3c9619806e8133f66d32c61e78b --- /dev/null +++ b/prompts/sound_director_prompt.txt @@ -0,0 +1,27 @@ +# ROLE: AI Sound Director & Foley Artist + +# GOAL: +You are the sound director for a film. Your task is to create a single, rich, and descriptive prompt for an audio generation model (like MMAudio). This prompt must describe the complete soundscape for the CURRENT scene, considering what happened before and what will happen next to ensure audio continuity. + +# CRITICAL RULES (MUST FOLLOW): +1. **NO SPEECH OR VOICES:** The final prompt must NOT include any terms related to human speech, dialogue, talking, voices, singing, or narration. The goal is to create a world of ambient sounds and specific sound effects (SFX). +2. **FOCUS ON THE PRESENT:** The audio must primarily match the CURRENT visual scene (Keyframe Kn) and its textual description (Ato_n). +3. **USE THE PAST FOR CONTINUITY:** Analyze the "Previous Audio Prompt" to understand the established soundscape. If a sound should logically continue from the previous scene, include it (e.g., "the continued sound of a gentle breeze..."). +4. **USE THE FUTURE FOR FORESHADOWING:** Analyze the FUTURE keyframe and scene description. If appropriate, introduce subtle sounds that hint at what's to come. (e.g., if the next scene is a storm, you could add "...with the faint, distant rumble of thunder in the background."). +5. **BE DESCRIPTIVE:** Use evocative language. Instead of "dog bark", use "the sharp, excited yapping of a small dog". Combine multiple elements into a cohesive soundscape. + +# CONTEXT FOR YOUR DECISION: + +- **Previous Audio Prompt (what was just heard):** +{audio_history} + +- **VISUAL PAST (Keyframe Kn-1):** [PAST_IMAGE] +- **VISUAL PRESENT (Keyframe Kn):** [PRESENT_IMAGE] +- **VISUAL FUTURE (Keyframe Kn+1):** [FUTURE_IMAGE] + +- **CURRENT Scene Description (Ato_n):** "{present_scene_desc}" +- **CURRENT Motion Prompt (what the camera is doing):** "{motion_prompt}" +- **FUTURE Scene Description (Ato_n+1):** "{future_scene_desc}" + +# RESPONSE FORMAT: +Respond with ONLY the final, single-line prompt string for the audio generator. \ No newline at end of file diff --git a/prompts/transition_decision_prompt.txt b/prompts/transition_decision_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..6607099eb2a8aa2ddd7cb7d1ed401298687f5e6d --- /dev/null +++ b/prompts/transition_decision_prompt.txt @@ -0,0 +1,27 @@ +# ROLE: AI Director of Continuity & Cinematographer + +# GOAL: +Analyze the visual continuity between a START, MIDPOINT, and DESTINATION image. Make a directorial decision: is the transition a "continuous" action or does it require a "cut"? Then, write the appropriate motion prompt. + +# INSTRUCTIONS: +1. **Analyze Continuity:** Can a subject logically and physically move from START, through MIDPOINT, to DESTINATION in a few seconds of continuous screen time? Consider changes in location, pose, and time of day. + * **Continuous Example:** Man walks to door (START) -> Hand on doorknob (MIDPOINT) -> Man walks through door (DESTINATION). + * **Cut Example:** Woman outside house (START) -> Close up on face (MIDPOINT) -> Woman now inside house (DESTINATION). +2. **Make a Decision:** + * If the action is unbroken, decide `"transition_type": "continuous"`. + * If there is a jump in time, space, or logic, decide `"transition_type": "cut"`. +3. **Write Motion Prompt:** + * **For "continuous":** Describe the physical action and camera movement. Example: "Camera follows the man as he opens the door and steps inside." + * **For "cut":** Describe a cinematic transition effect. DO NOT describe character actions. Example: "A smooth cross-dissolve transition to the new scene." + +# CONTEXT: +- Overall Story Goal: "{user_prompt}" +- Story So Far: {story_history} + +# SCENE ANALYSIS: +# START Image (Memory from last fragment): [Image 1] +# MIDPOINT Image (Path): [Image 2] +# DESTINATION Image (Destination): [Image 3] + +# RESPONSE FORMAT: +You MUST respond with a single, clean JSON object with two keys: "transition_type" and "motion_prompt". \ No newline at end of file diff --git a/prompts/unified_cinematographer_prompt.txt b/prompts/unified_cinematographer_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..7d4f23924ad6e00e2afbb1d71e510a3a3dbb9262 --- /dev/null +++ b/prompts/unified_cinematographer_prompt.txt @@ -0,0 +1,47 @@ +# ROLE: AI Continuity Director & Cinematographer + +# GOAL: +Your task is twofold. First, analyze two keyframe images (current and future) and their context to generate a precise, cinematic motion prompt describing the transition between them. Second, calculate a "Similarity Score" between the two images based on a strict set of criteria. + +# --- TASK 1: Generate Cinematic Motion Prompt --- + +# CONTEXT: +- Previous Motion Prompt (what I thought before): +{historico_prompt} + +- Current Scene Description (where we are now): "{cena_atual}" +- Future Scene Description (where we are going next): "{cena_futura}" + +# INSTRUCTIONS for Motion Prompt: +You must generate a single, concise, CLIP-style motion prompt describing the action that connects the CURRENT image to the FUTURE image. The prompt must be dense and descriptive, following this priority order: +1. **People/Animals:** Focus on expression, emotion, and specific actions. +2. **Objects:** Describe their location and any interaction or movement. +3. **Camera:** Specify focus, zoom, and movement (pan, tilt, dolly, etc.). + +Your prompt should describe the moment unfolding BETWEEN the current and future state. + +# --- TASK 2: Calculate Similarity Score --- + +# INSTRUCTIONS for Similarity Score: +Calculate a similarity score between the CURRENT and FUTURE images, ranging from 0.0 (completely different) to 1.0 (very similar). + +**Consider ONLY the following criteria for similarity:** +- **Objects:** Consistency in colors, textures, and relative sizes. +- **People/Animals:** Consistency in morphology (body shape), clothing, and accessories. +- **Environment:** Consistency in location, time of day (lighting), colors, and background/horizon. + +**Disregard the following for similarity:** +- Repositioning or movement of subjects or the camera. + +**Negative Factors (Penalties):** +- If the horizontal positions of two or more people are inverted (e.g., person A was on the left and is now on the right), REDUCE THE FINAL SCORE BY HALF (multiply by 0.5). +- If the entire image appears horizontally flipped (mirrored), REDUCE THE FINAL SCORE BY HALF (multiply by 0.5). + +# VISUAL ASSETS: +# [The CURRENT keyframe image will be provided here.] +# [The FUTURE keyframe image will be provided here.] + +# --- RESPONSE FORMAT --- +You MUST respond with a single, clean JSON object with exactly two keys: +1. "motion_prompt": A string containing the generated cinematic prompt. +2. "similarity_score": A floating-point number between 0.0 and 1.0. \ No newline at end of file diff --git a/prompts/unified_storyboard_prompt.txt b/prompts/unified_storyboard_prompt.txt new file mode 100644 index 0000000000000000000000000000000000000000..81d80363c76b52afcb2b6d123eca26c745bfe4ef --- /dev/null +++ b/prompts/unified_storyboard_prompt.txt @@ -0,0 +1,19 @@ +# ROLE: AI Storyboard Writer + +# GOAL: +You are a scriptwriter tasked with breaking down a general idea into a sequence of exactly {num_fragments} distinct scenes or "acts". Each scene should represent a clear, single moment in a linear narrative. + +# CRITICAL RULES (MUST FOLLOW): +1. **ANCHOR TO THE REFERENCE IMAGES:** The narrative, characters, and style MUST be directly inspired by the provided reference images. The story should feel like it belongs in the same world as these images. +2. **SIMPLE, LINEAR ACTION:** Do not create a complex plot. The entire sequence should represent a single, simple story arc unfolding over a few moments (e.g., a character notices something, approaches it, and reacts). +3. **FOCUS ON "WHAT", NOT "HOW":** Each description is a scene, not a camera direction. Describe the core action or emotional beat of the moment. Example: "The knight raises his shield" instead of "Close-up on the knight raising his shield". + +# CONTEXT: +- General Idea (User Prompt): "{user_prompt}" +- Number of Scenes to Create: {num_fragments} + +# YOUR TASK: +Based on the user's idea and the reference images, create a storyboard that tells a simple, continuous story across {num_fragments} scenes. + +# RESPONSE FORMAT: +Return a single JSON object with the key "scene_storyboard", containing an array of strings (the scene descriptions). \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..b6d13765e7856bc694186bdd947b2669d7f90e6f --- /dev/null +++ b/requirements.txt @@ -0,0 +1,42 @@ +# --- Core ML & AI Frameworks --- +# Usamos versões específicas para garantir a reprodutibilidade. +# diffusers está fixado em um commit específico da branch 'main' do GitHub para estabilidade. +torch +torchao +torchvision +torchaudio +transformers +accelerate +safetensors +einops +sentencepiece + +git+https://github.com/hkchengrex/MMAudio.git@main + +# O formato 'package @ git+...' é o padrão moderno para dependências do Git. +git+https://github.com/huggingface/diffusers.git@main + +# --- User Interface --- +#Gradio para uma que contém 'mount_app' +gradio>=5.23.1 +gradio[oauth] +fastapi +uvicorn[standard] +pydantic +soundfile + +# --- Data Handling & I/O --- +Pillow +numpy +PyYAML + +# --- Video & Image Processing --- +opencv-python +imageio +imageio-ffmpeg +av + +# --- Hugging Face Hub --- +huggingface_hub + +google-generativeai \ No newline at end of file diff --git a/tools/BEN2.py b/tools/BEN2.py new file mode 100644 index 0000000000000000000000000000000000000000..e22d32d51b0fec7bcd93da71748cbce25dac666d --- /dev/null +++ b/tools/BEN2.py @@ -0,0 +1,1359 @@ +# Copyright (c) 2025 Prama LLC +# SPDX-License-Identifier: MIT + +import math +import os +import random +import subprocess +import tempfile +import time + +import cv2 +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as checkpoint +from einops import rearrange +from PIL import Image, ImageOps +from timm.models.layers import DropPath, to_2tuple, trunc_normal_ +from torchvision import transforms + + +def set_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + + +# set_random_seed(9) + +torch.set_float32_matmul_precision('highest') + + +class Mlp(nn.Module): + """ Multilayer perceptron.""" + + def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +def window_partition(x, window_size): + """ + Args: + x: (B, H, W, C) + window_size (int): window size + Returns: + windows: (num_windows*B, window_size, window_size, C) + """ + B, H, W, C = x.shape + x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) + windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) + return windows + + +def window_reverse(windows, window_size, H, W): + """ + Args: + windows: (num_windows*B, window_size, window_size, C) + window_size (int): Window size + H (int): Height of image + W (int): Width of image + Returns: + x: (B, H, W, C) + """ + B = int(windows.shape[0] / (H * W / window_size / window_size)) + x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) + x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) + return x + + +class WindowAttention(nn.Module): + """ Window based multi-head self attention (W-MSA) module with relative position bias. + It supports both of shifted and non-shifted window. + Args: + dim (int): Number of input channels. + window_size (tuple[int]): The height and width of the window. + num_heads (int): Number of attention heads. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set + attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 + proj_drop (float, optional): Dropout ratio of output. Default: 0.0 + """ + + def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.): + + super().__init__() + self.dim = dim + self.window_size = window_size # Wh, Ww + self.num_heads = num_heads + head_dim = dim // num_heads + self.scale = qk_scale or head_dim ** -0.5 + + # define a parameter table of relative position bias + self.relative_position_bias_table = nn.Parameter( + torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)) # 2*Wh-1 * 2*Ww-1, nH + + # get pair-wise relative position index for each token inside the window + coords_h = torch.arange(self.window_size[0]) + coords_w = torch.arange(self.window_size[1]) + coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww + coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww + relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww + relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 + relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 + relative_coords[:, :, 1] += self.window_size[1] - 1 + relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 + relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww + self.register_buffer("relative_position_index", relative_position_index) + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + trunc_normal_(self.relative_position_bias_table, std=.02) + self.softmax = nn.Softmax(dim=-1) + + def forward(self, x, mask=None): + """ Forward function. + Args: + x: input features with shape of (num_windows*B, N, C) + mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None + """ + B_, N, C = x.shape + qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) + + q = q * self.scale + attn = (q @ k.transpose(-2, -1)) + + relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view( + self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1) # Wh*Ww,Wh*Ww,nH + relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww + attn = attn + relative_position_bias.unsqueeze(0) + + if mask is not None: + nW = mask.shape[0] + attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) + attn = attn.view(-1, self.num_heads, N, N) + attn = self.softmax(attn) + else: + attn = self.softmax(attn) + + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B_, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class SwinTransformerBlock(nn.Module): + """ Swin Transformer Block. + Args: + dim (int): Number of input channels. + num_heads (int): Number of attention heads. + window_size (int): Window size. + shift_size (int): Shift size for SW-MSA. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float, optional): Stochastic depth rate. Default: 0.0 + act_layer (nn.Module, optional): Activation layer. Default: nn.GELU + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, num_heads, window_size=7, shift_size=0, + mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0., + act_layer=nn.GELU, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.num_heads = num_heads + self.window_size = window_size + self.shift_size = shift_size + self.mlp_ratio = mlp_ratio + assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" + + self.norm1 = norm_layer(dim) + self.attn = WindowAttention( + dim, window_size=to_2tuple(self.window_size), num_heads=num_heads, + qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop) + + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop) + + self.H = None + self.W = None + + def forward(self, x, mask_matrix): + """ Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + mask_matrix: Attention mask for cyclic shift. + """ + B, L, C = x.shape + H, W = self.H, self.W + assert L == H * W, "input feature has wrong size" + + shortcut = x + x = self.norm1(x) + x = x.view(B, H, W, C) + + # pad feature maps to multiples of window size + pad_l = pad_t = 0 + pad_r = (self.window_size - W % self.window_size) % self.window_size + pad_b = (self.window_size - H % self.window_size) % self.window_size + x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) + _, Hp, Wp, _ = x.shape + + # cyclic shift + if self.shift_size > 0: + shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) + attn_mask = mask_matrix + else: + shifted_x = x + attn_mask = None + + # partition windows + x_windows = window_partition(shifted_x, self.window_size) # nW*B, window_size, window_size, C + x_windows = x_windows.view(-1, self.window_size * self.window_size, C) # nW*B, window_size*window_size, C + + # W-MSA/SW-MSA + attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C + + # merge windows + attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) + shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C + + # reverse cyclic shift + if self.shift_size > 0: + x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) + else: + x = shifted_x + + if pad_r > 0 or pad_b > 0: + x = x[:, :H, :W, :].contiguous() + + x = x.view(B, H * W, C) + + # FFN + x = shortcut + self.drop_path(x) + x = x + self.drop_path(self.mlp(self.norm2(x))) + + return x + + +class PatchMerging(nn.Module): + """ Patch Merging Layer + Args: + dim (int): Number of input channels. + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + """ + + def __init__(self, dim, norm_layer=nn.LayerNorm): + super().__init__() + self.dim = dim + self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) + self.norm = norm_layer(4 * dim) + + def forward(self, x, H, W): + """ Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + B, L, C = x.shape + assert L == H * W, "input feature has wrong size" + + x = x.view(B, H, W, C) + + # padding + pad_input = (H % 2 == 1) or (W % 2 == 1) + if pad_input: + x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) + + x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C + x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C + x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C + x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C + x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C + x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C + + x = self.norm(x) + x = self.reduction(x) + + return x + + +class BasicLayer(nn.Module): + """ A basic Swin Transformer layer for one stage. + Args: + dim (int): Number of feature channels + depth (int): Depths of this stage. + num_heads (int): Number of attention head. + window_size (int): Local window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. + drop (float, optional): Dropout rate. Default: 0.0 + attn_drop (float, optional): Attention dropout rate. Default: 0.0 + drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 + norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm + downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + dim, + depth, + num_heads, + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + norm_layer=nn.LayerNorm, + downsample=None, + use_checkpoint=False): + super().__init__() + self.window_size = window_size + self.shift_size = window_size // 2 + self.depth = depth + self.use_checkpoint = use_checkpoint + + # build blocks + self.blocks = nn.ModuleList([ + SwinTransformerBlock( + dim=dim, + num_heads=num_heads, + window_size=window_size, + shift_size=0 if (i % 2 == 0) else window_size // 2, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop, + attn_drop=attn_drop, + drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, + norm_layer=norm_layer) + for i in range(depth)]) + + # patch merging layer + if downsample is not None: + self.downsample = downsample(dim=dim, norm_layer=norm_layer) + else: + self.downsample = None + + def forward(self, x, H, W): + """ Forward function. + Args: + x: Input feature, tensor size (B, H*W, C). + H, W: Spatial resolution of the input feature. + """ + + # calculate attention mask for SW-MSA + Hp = int(np.ceil(H / self.window_size)) * self.window_size + Wp = int(np.ceil(W / self.window_size)) * self.window_size + img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 + h_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + w_slices = (slice(0, -self.window_size), + slice(-self.window_size, -self.shift_size), + slice(-self.shift_size, None)) + cnt = 0 + for h in h_slices: + for w in w_slices: + img_mask[:, h, w, :] = cnt + cnt += 1 + + mask_windows = window_partition(img_mask, self.window_size) # nW, window_size, window_size, 1 + mask_windows = mask_windows.view(-1, self.window_size * self.window_size) + attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) + attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) + + for blk in self.blocks: + blk.H, blk.W = H, W + if self.use_checkpoint: + x = checkpoint.checkpoint(blk, x, attn_mask) + else: + x = blk(x, attn_mask) + if self.downsample is not None: + x_down = self.downsample(x, H, W) + Wh, Ww = (H + 1) // 2, (W + 1) // 2 + return x, H, W, x_down, Wh, Ww + else: + return x, H, W, x, H, W + + +class PatchEmbed(nn.Module): + """ Image to Patch Embedding + Args: + patch_size (int): Patch token size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + norm_layer (nn.Module, optional): Normalization layer. Default: None + """ + + def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): + super().__init__() + patch_size = to_2tuple(patch_size) + self.patch_size = patch_size + + self.in_chans = in_chans + self.embed_dim = embed_dim + + self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) + if norm_layer is not None: + self.norm = norm_layer(embed_dim) + else: + self.norm = None + + def forward(self, x): + """Forward function.""" + # padding + _, _, H, W = x.size() + if W % self.patch_size[1] != 0: + x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) + if H % self.patch_size[0] != 0: + x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) + + x = self.proj(x) # B C Wh Ww + if self.norm is not None: + Wh, Ww = x.size(2), x.size(3) + x = x.flatten(2).transpose(1, 2) + x = self.norm(x) + x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) + + return x + + +class SwinTransformer(nn.Module): + """ Swin Transformer backbone. + A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - + https://arxiv.org/pdf/2103.14030 + Args: + pretrain_img_size (int): Input image size for training the pretrained model, + used in absolute postion embedding. Default 224. + patch_size (int | tuple(int)): Patch size. Default: 4. + in_chans (int): Number of input image channels. Default: 3. + embed_dim (int): Number of linear projection output channels. Default: 96. + depths (tuple[int]): Depths of each Swin Transformer stage. + num_heads (tuple[int]): Number of attention head of each stage. + window_size (int): Window size. Default: 7. + mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. + qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True + qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. + drop_rate (float): Dropout rate. + attn_drop_rate (float): Attention dropout rate. Default: 0. + drop_path_rate (float): Stochastic depth rate. Default: 0.2. + norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. + ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. + patch_norm (bool): If True, add normalization after patch embedding. Default: True. + out_indices (Sequence[int]): Output from which stages. + frozen_stages (int): Stages to be frozen (stop grad and set eval mode). + -1 means not freezing any parameters. + use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. + """ + + def __init__(self, + pretrain_img_size=224, + patch_size=4, + in_chans=3, + embed_dim=96, + depths=[2, 2, 6, 2], + num_heads=[3, 6, 12, 24], + window_size=7, + mlp_ratio=4., + qkv_bias=True, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.2, + norm_layer=nn.LayerNorm, + ape=False, + patch_norm=True, + out_indices=(0, 1, 2, 3), + frozen_stages=-1, + use_checkpoint=False): + super().__init__() + + self.pretrain_img_size = pretrain_img_size + self.num_layers = len(depths) + self.embed_dim = embed_dim + self.ape = ape + self.patch_norm = patch_norm + self.out_indices = out_indices + self.frozen_stages = frozen_stages + + # split image into non-overlapping patches + self.patch_embed = PatchEmbed( + patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim, + norm_layer=norm_layer if self.patch_norm else None) + + # absolute position embedding + if self.ape: + pretrain_img_size = to_2tuple(pretrain_img_size) + patch_size = to_2tuple(patch_size) + patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]] + + self.absolute_pos_embed = nn.Parameter( + torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])) + trunc_normal_(self.absolute_pos_embed, std=.02) + + self.pos_drop = nn.Dropout(p=drop_rate) + + # stochastic depth + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule + + # build layers + self.layers = nn.ModuleList() + for i_layer in range(self.num_layers): + layer = BasicLayer( + dim=int(embed_dim * 2 ** i_layer), + depth=depths[i_layer], + num_heads=num_heads[i_layer], + window_size=window_size, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], + norm_layer=norm_layer, + downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, + use_checkpoint=use_checkpoint) + self.layers.append(layer) + + num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] + self.num_features = num_features + + # add a norm layer for each output + for i_layer in out_indices: + layer = norm_layer(num_features[i_layer]) + layer_name = f'norm{i_layer}' + self.add_module(layer_name, layer) + + self._freeze_stages() + + def _freeze_stages(self): + if self.frozen_stages >= 0: + self.patch_embed.eval() + for param in self.patch_embed.parameters(): + param.requires_grad = False + + if self.frozen_stages >= 1 and self.ape: + self.absolute_pos_embed.requires_grad = False + + if self.frozen_stages >= 2: + self.pos_drop.eval() + for i in range(0, self.frozen_stages - 1): + m = self.layers[i] + m.eval() + for param in m.parameters(): + param.requires_grad = False + + def forward(self, x): + + x = self.patch_embed(x) + + Wh, Ww = x.size(2), x.size(3) + if self.ape: + # interpolate the position embedding to the corresponding size + absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic') + x = (x + absolute_pos_embed) # B Wh*Ww C + + outs = [x.contiguous()] + x = x.flatten(2).transpose(1, 2) + x = self.pos_drop(x) + + for i in range(self.num_layers): + layer = self.layers[i] + x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) + + if i in self.out_indices: + norm_layer = getattr(self, f'norm{i}') + x_out = norm_layer(x_out) + + out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() + outs.append(out) + + return tuple(outs) + + +def get_activation_fn(activation): + """Return an activation function given a string""" + if activation == "gelu": + return F.gelu + + raise RuntimeError(F"activation should be gelu, not {activation}.") + + +def make_cbr(in_dim, out_dim): + return nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1), nn.InstanceNorm2d(out_dim), nn.GELU()) + + +def make_cbg(in_dim, out_dim): + return nn.Sequential(nn.Conv2d(in_dim, out_dim, kernel_size=3, padding=1), nn.InstanceNorm2d(out_dim), nn.GELU()) + + +def rescale_to(x, scale_factor: float = 2, interpolation='nearest'): + return F.interpolate(x, scale_factor=scale_factor, mode=interpolation) + + +def resize_as(x, y, interpolation='bilinear'): + return F.interpolate(x, size=y.shape[-2:], mode=interpolation) + + +def image2patches(x): + """b c (hg h) (wg w) -> (hg wg b) c h w""" + x = rearrange(x, 'b c (hg h) (wg w) -> (hg wg b) c h w', hg=2, wg=2) + return x + + +def patches2image(x): + """(hg wg b) c h w -> b c (hg h) (wg w)""" + x = rearrange(x, '(hg wg b) c h w -> b c (hg h) (wg w)', hg=2, wg=2) + return x + + +class PositionEmbeddingSine: + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + self.dim_t = torch.arange(0, self.num_pos_feats, dtype=torch.float32) + + def __call__(self, b, h, w): + device = self.dim_t.device + mask = torch.zeros([b, h, w], dtype=torch.bool, device=device) + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(dim=1, dtype=torch.float32) + x_embed = not_mask.cumsum(dim=2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = self.temperature ** (2 * (self.dim_t.to(device) // 2) / self.num_pos_feats) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + + return torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + + +class PositionEmbeddingSine: + def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None): + super().__init__() + self.num_pos_feats = num_pos_feats + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError("normalize should be True if scale is passed") + if scale is None: + scale = 2 * math.pi + self.scale = scale + self.dim_t = torch.arange(0, self.num_pos_feats, dtype=torch.float32) + + def __call__(self, b, h, w): + device = self.dim_t.device + mask = torch.zeros([b, h, w], dtype=torch.bool, device=device) + assert mask is not None + not_mask = ~mask + y_embed = not_mask.cumsum(dim=1, dtype=torch.float32) + x_embed = not_mask.cumsum(dim=2, dtype=torch.float32) + if self.normalize: + eps = 1e-6 + y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale + x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale + + dim_t = self.temperature ** (2 * (self.dim_t.to(device) // 2) / self.num_pos_feats) + pos_x = x_embed[:, :, :, None] / dim_t + pos_y = y_embed[:, :, :, None] / dim_t + + pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) + pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) + + return torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) + + +class MCLM(nn.Module): + def __init__(self, d_model, num_heads, pool_ratios=[1, 4, 8]): + super(MCLM, self).__init__() + self.attention = nn.ModuleList([ + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1) + ]) + + self.linear1 = nn.Linear(d_model, d_model * 2) + self.linear2 = nn.Linear(d_model * 2, d_model) + self.linear3 = nn.Linear(d_model, d_model * 2) + self.linear4 = nn.Linear(d_model * 2, d_model) + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(0.1) + self.dropout1 = nn.Dropout(0.1) + self.dropout2 = nn.Dropout(0.1) + self.activation = get_activation_fn('gelu') + self.pool_ratios = pool_ratios + self.p_poses = [] + self.g_pos = None + self.positional_encoding = PositionEmbeddingSine(num_pos_feats=d_model // 2, normalize=True) + + def forward(self, l, g): + """ + l: 4,c,h,w + g: 1,c,h,w + """ + self.p_poses = [] + self.g_pos = None + b, c, h, w = l.size() + # 4,c,h,w -> 1,c,2h,2w + concated_locs = rearrange(l, '(hg wg b) c h w -> b c (hg h) (wg w)', hg=2, wg=2) + + pools = [] + for pool_ratio in self.pool_ratios: + # b,c,h,w + tgt_hw = (round(h / pool_ratio), round(w / pool_ratio)) + pool = F.adaptive_avg_pool2d(concated_locs, tgt_hw) + pools.append(rearrange(pool, 'b c h w -> (h w) b c')) + if self.g_pos is None: + pos_emb = self.positional_encoding(pool.shape[0], pool.shape[2], pool.shape[3]) + pos_emb = rearrange(pos_emb, 'b c h w -> (h w) b c') + self.p_poses.append(pos_emb) + pools = torch.cat(pools, 0) + if self.g_pos is None: + self.p_poses = torch.cat(self.p_poses, dim=0) + pos_emb = self.positional_encoding(g.shape[0], g.shape[2], g.shape[3]) + self.g_pos = rearrange(pos_emb, 'b c h w -> (h w) b c') + + device = pools.device + self.p_poses = self.p_poses.to(device) + self.g_pos = self.g_pos.to(device) + + # attention between glb (q) & multisensory concated-locs (k,v) + g_hw_b_c = rearrange(g, 'b c h w -> (h w) b c') + + g_hw_b_c = g_hw_b_c + self.dropout1(self.attention[0](g_hw_b_c + self.g_pos, pools + self.p_poses, pools)[0]) + g_hw_b_c = self.norm1(g_hw_b_c) + g_hw_b_c = g_hw_b_c + self.dropout2(self.linear2(self.dropout(self.activation(self.linear1(g_hw_b_c)).clone()))) + g_hw_b_c = self.norm2(g_hw_b_c) + + # attention between origin locs (q) & freashed glb (k,v) + l_hw_b_c = rearrange(l, "b c h w -> (h w) b c") + _g_hw_b_c = rearrange(g_hw_b_c, '(h w) b c -> h w b c', h=h, w=w) + _g_hw_b_c = rearrange(_g_hw_b_c, "(ng h) (nw w) b c -> (h w) (ng nw b) c", ng=2, nw=2) + outputs_re = [] + for i, (_l, _g) in enumerate(zip(l_hw_b_c.chunk(4, dim=1), _g_hw_b_c.chunk(4, dim=1))): + outputs_re.append(self.attention[i + 1](_l, _g, _g)[0]) # (h w) 1 c + outputs_re = torch.cat(outputs_re, 1) # (h w) 4 c + + l_hw_b_c = l_hw_b_c + self.dropout1(outputs_re) + l_hw_b_c = self.norm1(l_hw_b_c) + l_hw_b_c = l_hw_b_c + self.dropout2(self.linear4(self.dropout(self.activation(self.linear3(l_hw_b_c)).clone()))) + l_hw_b_c = self.norm2(l_hw_b_c) + + l = torch.cat((l_hw_b_c, g_hw_b_c), 1) # hw,b(5),c + return rearrange(l, "(h w) b c -> b c h w", h=h, w=w) ## (5,c,h*w) + + +class MCRM(nn.Module): + def __init__(self, d_model, num_heads, pool_ratios=[4, 8, 16], h=None): + super(MCRM, self).__init__() + self.attention = nn.ModuleList([ + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1), + nn.MultiheadAttention(d_model, num_heads, dropout=0.1) + ]) + self.linear3 = nn.Linear(d_model, d_model * 2) + self.linear4 = nn.Linear(d_model * 2, d_model) + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout = nn.Dropout(0.1) + self.dropout1 = nn.Dropout(0.1) + self.dropout2 = nn.Dropout(0.1) + self.sigmoid = nn.Sigmoid() + self.activation = get_activation_fn('gelu') + self.sal_conv = nn.Conv2d(d_model, 1, 1) + self.pool_ratios = pool_ratios + + def forward(self, x): + device = x.device + b, c, h, w = x.size() + loc, glb = x.split([4, 1], dim=0) # 4,c,h,w; 1,c,h,w + + patched_glb = rearrange(glb, 'b c (hg h) (wg w) -> (hg wg b) c h w', hg=2, wg=2) + + token_attention_map = self.sigmoid(self.sal_conv(glb)) + token_attention_map = F.interpolate(token_attention_map, size=patches2image(loc).shape[-2:], mode='nearest') + loc = loc * rearrange(token_attention_map, 'b c (hg h) (wg w) -> (hg wg b) c h w', hg=2, wg=2) + + pools = [] + for pool_ratio in self.pool_ratios: + tgt_hw = (round(h / pool_ratio), round(w / pool_ratio)) + pool = F.adaptive_avg_pool2d(patched_glb, tgt_hw) + pools.append(rearrange(pool, 'nl c h w -> nl c (h w)')) # nl(4),c,hw + + pools = rearrange(torch.cat(pools, 2), "nl c nphw -> nl nphw 1 c") + loc_ = rearrange(loc, 'nl c h w -> nl (h w) 1 c') + + outputs = [] + for i, q in enumerate(loc_.unbind(dim=0)): # traverse all local patches + v = pools[i] + k = v + outputs.append(self.attention[i](q, k, v)[0]) + + outputs = torch.cat(outputs, 1) + src = loc.view(4, c, -1).permute(2, 0, 1) + self.dropout1(outputs) + src = self.norm1(src) + src = src + self.dropout2(self.linear4(self.dropout(self.activation(self.linear3(src)).clone()))) + src = self.norm2(src) + src = src.permute(1, 2, 0).reshape(4, c, h, w) # freshed loc + glb = glb + F.interpolate(patches2image(src), size=glb.shape[-2:], mode='nearest') # freshed glb + + return torch.cat((src, glb), 0), token_attention_map + + +class BEN_Base(nn.Module): + def __init__(self): + super().__init__() + + self.backbone = SwinTransformer(embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12) + emb_dim = 128 + self.sideout5 = nn.Sequential(nn.Conv2d(emb_dim, 1, kernel_size=3, padding=1)) + self.sideout4 = nn.Sequential(nn.Conv2d(emb_dim, 1, kernel_size=3, padding=1)) + self.sideout3 = nn.Sequential(nn.Conv2d(emb_dim, 1, kernel_size=3, padding=1)) + self.sideout2 = nn.Sequential(nn.Conv2d(emb_dim, 1, kernel_size=3, padding=1)) + self.sideout1 = nn.Sequential(nn.Conv2d(emb_dim, 1, kernel_size=3, padding=1)) + + self.output5 = make_cbr(1024, emb_dim) + self.output4 = make_cbr(512, emb_dim) + self.output3 = make_cbr(256, emb_dim) + self.output2 = make_cbr(128, emb_dim) + self.output1 = make_cbr(128, emb_dim) + + self.multifieldcrossatt = MCLM(emb_dim, 1, [1, 4, 8]) + self.conv1 = make_cbr(emb_dim, emb_dim) + self.conv2 = make_cbr(emb_dim, emb_dim) + self.conv3 = make_cbr(emb_dim, emb_dim) + self.conv4 = make_cbr(emb_dim, emb_dim) + self.dec_blk1 = MCRM(emb_dim, 1, [2, 4, 8]) + self.dec_blk2 = MCRM(emb_dim, 1, [2, 4, 8]) + self.dec_blk3 = MCRM(emb_dim, 1, [2, 4, 8]) + self.dec_blk4 = MCRM(emb_dim, 1, [2, 4, 8]) + + self.insmask_head = nn.Sequential( + nn.Conv2d(emb_dim, 384, kernel_size=3, padding=1), + nn.InstanceNorm2d(384), + nn.GELU(), + nn.Conv2d(384, 384, kernel_size=3, padding=1), + nn.InstanceNorm2d(384), + nn.GELU(), + nn.Conv2d(384, emb_dim, kernel_size=3, padding=1) + ) + + self.shallow = nn.Sequential(nn.Conv2d(3, emb_dim, kernel_size=3, padding=1)) + self.upsample1 = make_cbg(emb_dim, emb_dim) + self.upsample2 = make_cbg(emb_dim, emb_dim) + self.output = nn.Sequential(nn.Conv2d(emb_dim, 1, kernel_size=3, padding=1)) + + for m in self.modules(): + if isinstance(m, nn.GELU) or isinstance(m, nn.Dropout): + m.inplace = True + + @torch.inference_mode() + @torch.autocast(device_type="cuda", dtype=torch.float16) + def forward(self, x): + real_batch = x.size(0) + + shallow_batch = self.shallow(x) + glb_batch = rescale_to(x, scale_factor=0.5, interpolation='bilinear') + + final_input = None + for i in range(real_batch): + start = i * 4 + end = (i + 1) * 4 + loc_batch = image2patches(x[i, :, :, :].unsqueeze(dim=0)) + input_ = torch.cat((loc_batch, glb_batch[i, :, :, :].unsqueeze(dim=0)), dim=0) + + if final_input == None: + final_input = input_ + else: + final_input = torch.cat((final_input, input_), dim=0) + + features = self.backbone(final_input) + outputs = [] + + for i in range(real_batch): + start = i * 5 + end = (i + 1) * 5 + + f4 = features[4][start:end, :, :, :] # shape: [5, C, H, W] + f3 = features[3][start:end, :, :, :] + f2 = features[2][start:end, :, :, :] + f1 = features[1][start:end, :, :, :] + f0 = features[0][start:end, :, :, :] + e5 = self.output5(f4) + e4 = self.output4(f3) + e3 = self.output3(f2) + e2 = self.output2(f1) + e1 = self.output1(f0) + loc_e5, glb_e5 = e5.split([4, 1], dim=0) + e5 = self.multifieldcrossatt(loc_e5, glb_e5) # (4,128,16,16) + + e4, tokenattmap4 = self.dec_blk4(e4 + resize_as(e5, e4)) + e4 = self.conv4(e4) + e3, tokenattmap3 = self.dec_blk3(e3 + resize_as(e4, e3)) + e3 = self.conv3(e3) + e2, tokenattmap2 = self.dec_blk2(e2 + resize_as(e3, e2)) + e2 = self.conv2(e2) + e1, tokenattmap1 = self.dec_blk1(e1 + resize_as(e2, e1)) + e1 = self.conv1(e1) + + loc_e1, glb_e1 = e1.split([4, 1], dim=0) + + output1_cat = patches2image(loc_e1) # (1,128,256,256) + + # add glb feat in + output1_cat = output1_cat + resize_as(glb_e1, output1_cat) + # merge + final_output = self.insmask_head(output1_cat) # (1,128,256,256) + # shallow feature merge + shallow = shallow_batch[i, :, :, :].unsqueeze(dim=0) + final_output = final_output + resize_as(shallow, final_output) + final_output = self.upsample1(rescale_to(final_output)) + final_output = rescale_to(final_output + resize_as(shallow, final_output)) + final_output = self.upsample2(final_output) + final_output = self.output(final_output) + mask = final_output.sigmoid() + outputs.append(mask) + + return torch.cat(outputs, dim=0) + + def loadcheckpoints(self, model_path): + model_dict = torch.load(model_path, map_location="cpu", weights_only=True) + self.load_state_dict(model_dict['model_state_dict'], strict=True) + del model_path + + def inference(self, image, refine_foreground=False): + + # set_random_seed(9) + # image = ImageOps.exif_transpose(image) + if isinstance(image, Image.Image): + image, h, w, original_image = rgb_loader_refiner(image) + if torch.cuda.is_available(): + + img_tensor = img_transform(image).unsqueeze(0).to(next(self.parameters()).device) + else: + img_tensor = img_transform32(image).unsqueeze(0).to(next(self.parameters()).device) + + with torch.no_grad(): + res = self.forward(img_tensor) + + # Show Results + if refine_foreground == True: + + pred_pil = transforms.ToPILImage()(res.squeeze()) + image_masked = refine_foreground_process(original_image, pred_pil) + + image_masked.putalpha(pred_pil.resize(original_image.size)) + return image_masked + + else: + alpha = postprocess_image(res, im_size=[w, h]) + pred_pil = transforms.ToPILImage()(alpha) + mask = pred_pil.resize(original_image.size) + original_image.putalpha(mask) + # mask = Image.fromarray(alpha) + + # 将背景置为白色 + white_background = Image.new('RGB', original_image.size, (255, 255, 255)) + white_background.paste(original_image, mask=original_image.split()[3]) + original_image = white_background + + return original_image + + + else: + foregrounds = [] + for batch in image: + image, h, w, original_image = rgb_loader_refiner(batch) + if torch.cuda.is_available(): + + img_tensor = img_transform(image).unsqueeze(0).to(next(self.parameters()).device) + else: + img_tensor = img_transform32(image).unsqueeze(0).to(next(self.parameters()).device) + + with torch.no_grad(): + res = self.forward(img_tensor) + + if refine_foreground == True: + + pred_pil = transforms.ToPILImage()(res.squeeze()) + image_masked = refine_foreground_process(original_image, pred_pil) + + image_masked.putalpha(pred_pil.resize(original_image.size)) + + foregrounds.append(image_masked) + else: + alpha = postprocess_image(res, im_size=[w, h]) + pred_pil = transforms.ToPILImage()(alpha) + mask = pred_pil.resize(original_image.size) + original_image.putalpha(mask) + # mask = Image.fromarray(alpha) + foregrounds.append(original_image) + + return foregrounds + + def segment_video(self, video_path, output_path="./", fps=0, refine_foreground=False, batch=1, + print_frames_processed=True, webm=False, rgb_value=(0, 255, 0)): + + """ + Segments the given video to extract the foreground (with alpha) from each frame + and saves the result as either a WebM video (with alpha channel) or MP4 (with a + color background). + + Args: + video_path (str): + Path to the input video file. + + output_path (str, optional): + Directory (or full path) where the output video and/or files will be saved. + Defaults to "./". + + fps (int, optional): + The frames per second (FPS) to use for the output video. If 0 (default), the + original FPS of the input video is used. Otherwise, overrides it. + + refine_foreground (bool, optional): + Whether to run an additional “refine foreground” process on each frame. + Defaults to False. + + batch (int, optional): + Number of frames to process at once (inference batch size). Large batch sizes + may require more GPU memory. Defaults to 1. + + print_frames_processed (bool, optional): + If True (default), prints progress (how many frames have been processed) to + the console. + + webm (bool, optional): + If True (default), exports a WebM video with alpha channel (VP9 / yuva420p). + If False, exports an MP4 video composited over a solid color background. + + rgb_value (tuple, optional): + The RGB background color (e.g., green screen) used to composite frames when + saving to MP4. Defaults to (0, 255, 0). + + Returns: + None. Writes the output video(s) to disk in the specified format. + """ + + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + raise IOError(f"Cannot open video: {video_path}") + + original_fps = cap.get(cv2.CAP_PROP_FPS) + original_fps = 30 if original_fps == 0 else original_fps + fps = original_fps if fps == 0 else fps + + ret, first_frame = cap.read() + if not ret: + raise ValueError("No frames found in the video.") + height, width = first_frame.shape[:2] + cap.set(cv2.CAP_PROP_POS_FRAMES, 0) + + foregrounds = [] + frame_idx = 0 + processed_count = 0 + batch_frames = [] + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + while True: + ret, frame = cap.read() + if not ret: + if batch_frames: + batch_results = self.inference(batch_frames, refine_foreground) + if isinstance(batch_results, Image.Image): + foregrounds.append(batch_results) + else: + foregrounds.extend(batch_results) + if print_frames_processed: + print(f"Processed frames {frame_idx - len(batch_frames) + 1} to {frame_idx} of {total_frames}") + break + + # Process every frame instead of using intervals + frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) + pil_frame = Image.fromarray(frame_rgb) + batch_frames.append(pil_frame) + + if len(batch_frames) == batch: + batch_results = self.inference(batch_frames, refine_foreground) + if isinstance(batch_results, Image.Image): + foregrounds.append(batch_results) + else: + foregrounds.extend(batch_results) + if print_frames_processed: + print(f"Processed frames {frame_idx - batch + 1} to {frame_idx} of {total_frames}") + batch_frames = [] + processed_count += batch + + frame_idx += 1 + + if webm: + alpha_webm_path = os.path.join(output_path, "foreground.webm") + pil_images_to_webm_alpha(foregrounds, alpha_webm_path, fps=original_fps) + + else: + cap.release() + fg_output = os.path.join(output_path, 'foreground.mp4') + + pil_images_to_mp4(foregrounds, fg_output, fps=original_fps, rgb_value=rgb_value) + cv2.destroyAllWindows() + + try: + fg_audio_output = os.path.join(output_path, 'foreground_output_with_audio.mp4') + add_audio_to_video(fg_output, video_path, fg_audio_output) + except Exception as e: + print("No audio found in the original video") + print(e) + + +def rgb_loader_refiner(original_image): + h, w = original_image.size + + image = original_image + # Convert to RGB if necessary + if image.mode != 'RGB': + image = image.convert('RGB') + + # Resize the image + image = image.resize((1024, 1024), resample=Image.LANCZOS) + + return image.convert('RGB'), h, w, original_image + + +# Define the image transformation +img_transform = transforms.Compose([ + transforms.ToTensor(), + transforms.ConvertImageDtype(torch.float16), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +]) + +img_transform32 = transforms.Compose([ + transforms.ToTensor(), + transforms.ConvertImageDtype(torch.float32), + transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) +]) + + +def pil_images_to_mp4(images, output_path, fps=24, rgb_value=(0, 255, 0)): + """ + Converts an array of PIL images to an MP4 video. + + Args: + images: List of PIL images + output_path: Path to save the MP4 file + fps: Frames per second (default: 24) + rgb_value: Background RGB color tuple (default: green (0, 255, 0)) + """ + if not images: + raise ValueError("No images provided to convert to MP4.") + + width, height = images[0].size + fourcc = cv2.VideoWriter_fourcc(*'mp4v') + video_writer = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) + + for image in images: + # If image has alpha channel, composite onto the specified background color + if image.mode == 'RGBA': + # Create background image with specified RGB color + background = Image.new('RGB', image.size, rgb_value) + background = background.convert('RGBA') + # Composite the image onto the background + image = Image.alpha_composite(background, image) + image = image.convert('RGB') + else: + # Ensure RGB format for non-alpha images + image = image.convert('RGB') + + # Convert to OpenCV format and write + open_cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) + video_writer.write(open_cv_image) + + video_writer.release() + + +def pil_images_to_webm_alpha(images, output_path, fps=30): + """ + Converts a list of PIL RGBA images to a VP9 .webm video with alpha channel. + + NOTE: Not all players will display alpha in WebM. + Browsers like Chrome/Firefox typically do support VP9 alpha. + """ + if not images: + raise ValueError("No images provided for WebM with alpha.") + + # Ensure output directory exists + os.makedirs(os.path.dirname(output_path), exist_ok=True) + + with tempfile.TemporaryDirectory() as tmpdir: + # Save frames as PNG (with alpha) + for idx, img in enumerate(images): + if img.mode != "RGBA": + img = img.convert("RGBA") + out_path = os.path.join(tmpdir, f"{idx:06d}.png") + img.save(out_path, "PNG") + + # Construct ffmpeg command + # -c:v libvpx-vp9 => VP9 encoder + # -pix_fmt yuva420p => alpha-enabled pixel format + # -auto-alt-ref 0 => helps preserve alpha frames (libvpx quirk) + ffmpeg_cmd = [ + "ffmpeg", "-y", + "-framerate", str(fps), + "-i", os.path.join(tmpdir, "%06d.png"), + "-c:v", "libvpx-vp9", + "-pix_fmt", "yuva420p", + "-auto-alt-ref", "0", + output_path + ] + + subprocess.run(ffmpeg_cmd, check=True) + + print(f"WebM with alpha saved to {output_path}") + + +def add_audio_to_video(video_without_audio_path, original_video_path, output_path): + """ + Check if the original video has an audio stream. If yes, add it. If not, skip. + """ + # 1) Probe original video for audio streams + probe_command = [ + 'ffprobe', '-v', 'error', + '-select_streams', 'a:0', + '-show_entries', 'stream=index', + '-of', 'csv=p=0', + original_video_path + ] + result = subprocess.run(probe_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + + # result.stdout is empty if no audio stream found + if not result.stdout.strip(): + print("No audio track found in original video, skipping audio addition.") + return + + print("Audio track detected; proceeding to mux audio.") + # 2) If audio found, run ffmpeg to add it + command = [ + 'ffmpeg', '-y', + '-i', video_without_audio_path, + '-i', original_video_path, + '-c', 'copy', + '-map', '0:v:0', + '-map', '1:a:0', # we know there's an audio track now + output_path + ] + subprocess.run(command, check=True) + print(f"Audio added successfully => {output_path}") + + +### Thanks to the source: https://huggingface.co/ZhengPeng7/BiRefNet/blob/main/handler.py +def refine_foreground_process(image, mask, r=90): + if mask.size != image.size: + mask = mask.resize(image.size) + image = np.array(image) / 255.0 + mask = np.array(mask) / 255.0 + estimated_foreground = FB_blur_fusion_foreground_estimator_2(image, mask, r=r) + image_masked = Image.fromarray((estimated_foreground * 255.0).astype(np.uint8)) + return image_masked + + +def FB_blur_fusion_foreground_estimator_2(image, alpha, r=90): + # Thanks to the source: https://github.com/Photoroom/fast-foreground-estimation + alpha = alpha[:, :, None] + F, blur_B = FB_blur_fusion_foreground_estimator(image, image, image, alpha, r) + return FB_blur_fusion_foreground_estimator(image, F, blur_B, alpha, r=6)[0] + + +def FB_blur_fusion_foreground_estimator(image, F, B, alpha, r=90): + if isinstance(image, Image.Image): + image = np.array(image) / 255.0 + blurred_alpha = cv2.blur(alpha, (r, r))[:, :, None] + + blurred_FA = cv2.blur(F * alpha, (r, r)) + blurred_F = blurred_FA / (blurred_alpha + 1e-5) + + blurred_B1A = cv2.blur(B * (1 - alpha), (r, r)) + blurred_B = blurred_B1A / ((1 - blurred_alpha) + 1e-5) + F = blurred_F + alpha * \ + (image - alpha * blurred_F - (1 - alpha) * blurred_B) + F = np.clip(F, 0, 1) + return F, blurred_B + + +def postprocess_image(result: torch.Tensor, im_size: list) -> np.ndarray: + result = torch.squeeze(F.interpolate(result, size=im_size, mode='bilinear'), 0) + ma = torch.max(result) + mi = torch.min(result) + result = (result - mi) / (ma - mi) + im_array = (result * 255).permute(1, 2, 0).cpu().data.numpy().astype(np.uint8) + im_array = np.squeeze(im_array) + return im_array + + +def rgb_loader_refiner(original_image): + h, w = original_image.size + # # Apply EXIF orientation + + image = ImageOps.exif_transpose(original_image) + + if original_image.mode != 'RGB': + original_image = original_image.convert('RGB') + + image = original_image + # Convert to RGB if necessary + + # Resize the image + image = image.resize((1024, 1024), resample=Image.LANCZOS) + + return image, h, w, original_image \ No newline at end of file