# Copyright 2025 The Wan Team and The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import math
import os
from typing import Any, Dict, List, Optional, Tuple, Union

import torch
import torch.nn as nn
import torch.nn.functional as F
from diffusers.configuration_utils import ConfigMixin
from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
from diffusers.models.attention import FeedForward, _chunked_feed_forward
from diffusers.models.attention_processor import Attention
from diffusers.models.embeddings import PixArtAlphaTextProjection, TimestepEmbedding, Timesteps, get_1d_rotary_pos_embed
from diffusers.models.modeling_outputs import Transformer2DModelOutput
from diffusers.models.modeling_utils import ModelMixin
from diffusers.models.normalization import FP32LayerNorm
from diffusers.utils import USE_PEFT_BACKEND, logging, scale_lora_layers, unscale_lora_layers

from simpletuner.helpers.training.tread import TREADRouter

logger = logging.get_logger(__name__)  # pylint: disable=invalid-name

WAN_FEED_FORWARD_CHUNK_SIZE = int(os.getenv("WAN_FEED_FORWARD_CHUNK_SIZE", "0") or 0)
WAN_FEED_FORWARD_CHUNK_DIM = int(os.getenv("WAN_FEED_FORWARD_CHUNK_DIM", "0") or 0)


def _apply_rotary_emb_anyshape(x, rotary_emb, use_real=False):
    """
    Apply rotary embeddings that may be batched.
    Adapted for Wan's specific rotary embedding format.

    Wan's rotary embeddings encode 3D positions (T,H,W) with separate
    frequency components for each dimension. This function handles both
    the original single-batch format and the routed multi-batch format.
    """
    if rotary_emb.ndim == 4:  # (B, 1, S, D) - batched case for routed tokens
        rotary_emb = rotary_emb.squeeze(1)  # (B, S, D)
        # Convert to complex for Wan's rotary application
        x_rotated = torch.view_as_complex(
            x.to(torch.float32 if torch.backends.mps.is_available() else torch.float64).unflatten(3, (-1, 2))
        )
        # Expand rotary_emb for heads dimension
        rotary_emb_exp = rotary_emb.unsqueeze(1).expand(-1, x.size(1), -1, -1)
        x_out = torch.view_as_real(x_rotated * rotary_emb_exp).flatten(3, 4)
        return x_out.type_as(x)
    else:  # Original case - (1, 1, S, D)
        # Use original Wan rotary embedding application
        x_rotated = torch.view_as_complex(
            x.to(torch.float32 if torch.backends.mps.is_available() else torch.float64).unflatten(3, (-1, 2))
        )
        x_out = torch.view_as_real(x_rotated * rotary_emb).flatten(3, 4)
        return x_out.type_as(x)


class WanAttnProcessor2_0:
    def __init__(self):
        if not hasattr(F, "scaled_dot_product_attention"):
            raise ImportError("WanAttnProcessor2_0 requires PyTorch 2.0. To use it, please upgrade PyTorch to 2.0.")

    def __call__(
        self,
        attn: Attention,
        hidden_states: torch.Tensor,
        encoder_hidden_states: Optional[torch.Tensor] = None,
        attention_mask: Optional[torch.Tensor] = None,
        rotary_emb: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
        target_dtype = hidden_states.dtype
        target_device = hidden_states.device

        def _ensure_linear_dtype(linear_module: Optional[nn.Module]) -> None:
            if linear_module is None or not hasattr(linear_module, "weight"):
                return
            weight = linear_module.weight
            if weight.device != target_device or weight.dtype != target_dtype:
                linear_module.to(device=target_device, dtype=target_dtype)

        _ensure_linear_dtype(getattr(attn, "to_q", None))
        _ensure_linear_dtype(getattr(attn, "to_k", None))
        _ensure_linear_dtype(getattr(attn, "to_v", None))
        _ensure_linear_dtype(getattr(attn, "add_k_proj", None))
        _ensure_linear_dtype(getattr(attn, "add_v_proj", None))

        if isinstance(getattr(attn, "to_out", None), (list, tuple, nn.ModuleList)) and attn.to_out:
            _ensure_linear_dtype(attn.to_out[0])
        elif hasattr(attn, "to_out") and isinstance(attn.to_out, nn.Module):
            _ensure_linear_dtype(attn.to_out)

        encoder_hidden_states_img = None
        if attn.add_k_proj is not None:
            encoder_hidden_states_img = encoder_hidden_states[:, :513]
            encoder_hidden_states = encoder_hidden_states[:, 513:]
        if encoder_hidden_states is None:
            encoder_hidden_states = hidden_states

        query = attn.to_q(hidden_states)
        key = attn.to_k(encoder_hidden_states)
        value = attn.to_v(encoder_hidden_states)

        query = query.to(dtype=target_dtype)
        key = key.to(dtype=target_dtype)
        value = value.to(dtype=target_dtype)

        if attn.norm_q is not None:
            query = attn.norm_q(query).to(dtype=target_dtype)
        if attn.norm_k is not None:
            key = attn.norm_k(key).to(dtype=target_dtype)

        query = query.unflatten(2, (attn.heads, -1)).transpose(1, 2)
        key = key.unflatten(2, (attn.heads, -1)).transpose(1, 2)
        value = value.unflatten(2, (attn.heads, -1)).transpose(1, 2)

        if rotary_emb is not None:
            # Use the new function that handles batched rotary embeddings
            query = _apply_rotary_emb_anyshape(query, rotary_emb)
            key = _apply_rotary_emb_anyshape(key, rotary_emb)

        # I2V task
        hidden_states_img = None
        if encoder_hidden_states_img is not None:
            key_img = attn.add_k_proj(encoder_hidden_states_img)
            key_img = attn.norm_added_k(key_img)
            value_img = attn.add_v_proj(encoder_hidden_states_img)

            key_img = key_img.to(dtype=target_dtype)
            value_img = value_img.to(dtype=target_dtype)

            key_img = key_img.unflatten(2, (attn.heads, -1)).transpose(1, 2)
            value_img = value_img.unflatten(2, (attn.heads, -1)).transpose(1, 2)

            hidden_states_img = F.scaled_dot_product_attention(
                query,
                key_img,
                value_img,
                attn_mask=None,
                dropout_p=0.0,
                is_causal=False,
            )
            hidden_states_img = hidden_states_img.transpose(1, 2).flatten(2, 3)
            hidden_states_img = hidden_states_img.type_as(query)

        hidden_states = F.scaled_dot_product_attention(
            query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
        )
        hidden_states = hidden_states.transpose(1, 2).flatten(2, 3)
        hidden_states = hidden_states.type_as(query)

        if hidden_states_img is not None:
            hidden_states = hidden_states + hidden_states_img

        hidden_states = attn.to_out[0](hidden_states)
        hidden_states = attn.to_out[1](hidden_states)
        return hidden_states.to(dtype=target_dtype)


class WanImageEmbedding(torch.nn.Module):
    def __init__(self, in_features: int, out_features: int):
        super().__init__()

        self.norm1 = FP32LayerNorm(in_features)
        self.ff = FeedForward(in_features, out_features, mult=1, activation_fn="gelu")
        self.norm2 = FP32LayerNorm(out_features)

    def forward(self, encoder_hidden_states_image: torch.Tensor) -> torch.Tensor:
        hidden_states = self.norm1(encoder_hidden_states_image)
        hidden_states = self.ff(hidden_states)
        hidden_states = self.norm2(hidden_states)
        return hidden_states


class WanTimeTextImageEmbedding(nn.Module):
    def __init__(
        self,
        dim: int,
        time_freq_dim: int,
        time_proj_dim: int,
        text_embed_dim: int,
        image_embed_dim: Optional[int] = None,
    ):
        super().__init__()

        self.timesteps_proj = Timesteps(num_channels=time_freq_dim, flip_sin_to_cos=True, downscale_freq_shift=0)
        self.time_embedder = TimestepEmbedding(in_channels=time_freq_dim, time_embed_dim=dim)
        self.act_fn = nn.SiLU()
        self.time_proj = nn.Linear(dim, time_proj_dim)
        self.text_embedder = PixArtAlphaTextProjection(text_embed_dim, dim, act_fn="gelu_tanh")

        self.image_embedder = None
        if image_embed_dim is not None:
            self.image_embedder = WanImageEmbedding(image_embed_dim, dim)

    def forward(
        self,
        timestep: torch.Tensor,
        encoder_hidden_states: torch.Tensor,
        encoder_hidden_states_image: Optional[torch.Tensor] = None,
    ):
        timestep = self.timesteps_proj(timestep)

        time_embedder_dtype = next(iter(self.time_embedder.parameters())).dtype
        if timestep.dtype != time_embedder_dtype and time_embedder_dtype != torch.int8:
            timestep = timestep.to(time_embedder_dtype)
        temb = self.time_embedder(timestep).type_as(encoder_hidden_states)
        timestep_proj = self.time_proj(self.act_fn(temb))

        encoder_hidden_states = self.text_embedder(encoder_hidden_states)
        if encoder_hidden_states_image is not None:
            encoder_hidden_states_image = self.image_embedder(encoder_hidden_states_image)

        return temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image


class WanRotaryPosEmbed(nn.Module):
    def __init__(
        self,
        attention_head_dim: int,
        patch_size: Tuple[int, int, int],
        max_seq_len: int,
        theta: float = 10000.0,
    ):
        super().__init__()

        self.attention_head_dim = attention_head_dim
        self.patch_size = patch_size
        self.max_seq_len = max_seq_len

        h_dim = w_dim = 2 * (attention_head_dim // 6)
        t_dim = attention_head_dim - h_dim - w_dim

        freqs = []
        for dim in [t_dim, h_dim, w_dim]:
            freq = get_1d_rotary_pos_embed(
                dim,
                max_seq_len,
                theta,
                use_real=False,
                repeat_interleave_real=False,
                freqs_dtype=(torch.float32 if torch.backends.mps.is_available() else torch.float64),
            )
            freqs.append(freq)
        self.freqs = torch.cat(freqs, dim=1)

    def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
        batch_size, num_channels, num_frames, height, width = hidden_states.shape
        p_t, p_h, p_w = self.patch_size
        ppf, pph, ppw = num_frames // p_t, height // p_h, width // p_w

        self.freqs = self.freqs.to(hidden_states.device)
        freqs = self.freqs.split_with_sizes(
            [
                self.attention_head_dim // 2 - 2 * (self.attention_head_dim // 6),
                self.attention_head_dim // 6,
                self.attention_head_dim // 6,
            ],
            dim=1,
        )

        freqs_f = freqs[0][:ppf].view(ppf, 1, 1, -1).expand(ppf, pph, ppw, -1)
        freqs_h = freqs[1][:pph].view(1, pph, 1, -1).expand(ppf, pph, ppw, -1)
        freqs_w = freqs[2][:ppw].view(1, 1, ppw, -1).expand(ppf, pph, ppw, -1)
        freqs = torch.cat([freqs_f, freqs_h, freqs_w], dim=-1).reshape(1, 1, ppf * pph * ppw, -1)
        return freqs


class WanTransformerBlock(nn.Module):
    def __init__(
        self,
        dim: int,
        ffn_dim: int,
        num_heads: int,
        qk_norm: str = "rms_norm_across_heads",
        cross_attn_norm: bool = False,
        eps: float = 1e-6,
        added_kv_proj_dim: Optional[int] = None,
    ):
        super().__init__()

        # 1. Self-attention
        self.norm1 = FP32LayerNorm(dim, eps, elementwise_affine=False)
        self.attn1 = Attention(
            query_dim=dim,
            heads=num_heads,
            kv_heads=num_heads,
            dim_head=dim // num_heads,
            qk_norm=qk_norm,
            eps=eps,
            bias=True,
            cross_attention_dim=None,
            out_bias=True,
            processor=WanAttnProcessor2_0(),
        )

        # 2. Cross-attention
        self.attn2 = Attention(
            query_dim=dim,
            heads=num_heads,
            kv_heads=num_heads,
            dim_head=dim // num_heads,
            qk_norm=qk_norm,
            eps=eps,
            bias=True,
            cross_attention_dim=None,
            out_bias=True,
            added_kv_proj_dim=added_kv_proj_dim,
            added_proj_bias=True,
            processor=WanAttnProcessor2_0(),
        )
        self.norm2 = FP32LayerNorm(dim, eps, elementwise_affine=True) if cross_attn_norm else nn.Identity()

        # 3. Feed-forward
        self.ffn = FeedForward(dim, inner_dim=ffn_dim, activation_fn="gelu-approximate")
        self.norm3 = FP32LayerNorm(dim, eps, elementwise_affine=False)

        self.scale_shift_table = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
        self._parameter_dtype = self.attn1.to_q.weight.dtype
        self._parameter_device = self.attn1.to_q.weight.device
        self._chunk_enabled = False
        self._chunk_auto = False
        self._chunk_size: Optional[int] = None
        self._chunk_dim: Optional[int] = None
        if WAN_FEED_FORWARD_CHUNK_SIZE > 0:
            self.set_chunk_feed_forward(WAN_FEED_FORWARD_CHUNK_SIZE, WAN_FEED_FORWARD_CHUNK_DIM)

    def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: Optional[int] = 0) -> None:
        self._chunk_enabled = True
        if chunk_size is None:
            self._chunk_auto = True
            self._chunk_size = None
            self._chunk_dim = dim
        else:
            normalized_size = max(1, int(chunk_size))
            normalized_dim = int(dim) if dim is not None else 0
            self._chunk_auto = False
            self._chunk_size = normalized_size
            self._chunk_dim = normalized_dim

    def disable_chunk_feed_forward(self) -> None:
        self._chunk_enabled = False
        self._chunk_auto = False
        self._chunk_size = None
        self._chunk_dim = None

    def _ensure_module_dtype(self, device: torch.device, dtype: torch.dtype) -> None:
        attn_weight = self.attn1.to_q.weight
        attn_synced = attn_weight.device == device and attn_weight.dtype == dtype
        norm_synced = True
        for norm_module in (self.norm1, self.norm2, self.norm3):
            weight = getattr(norm_module, "weight", None)
            if weight is not None and weight.device != device:
                norm_synced = False
                break
        modules_synced = attn_synced and norm_synced
        scale_shift_synced = self.scale_shift_table.device == device and self.scale_shift_table.dtype == dtype

        if modules_synced and scale_shift_synced:
            return

        if not modules_synced:
            for module in (self.attn1, self.attn2, self.ffn):
                module.to(device=device, dtype=dtype)
            for norm_module in (self.norm1, self.norm2, self.norm3):
                if hasattr(norm_module, "to"):
                    # Norm layers intentionally stay in FP32; only realign devices.
                    norm_module.to(device=device)

        if not scale_shift_synced:
            self.scale_shift_table.data = self.scale_shift_table.data.to(device=device, dtype=dtype)

        self._parameter_dtype = self.attn1.to_q.weight.dtype
        self._parameter_device = self.attn1.to_q.weight.device

    def forward(
        self,
        hidden_states: torch.Tensor,
        encoder_hidden_states: torch.Tensor,
        temb: torch.Tensor,
        rotary_emb: torch.Tensor,
    ) -> torch.Tensor:
        self._ensure_module_dtype(hidden_states.device, hidden_states.dtype)

        temb = temb.to(device=self.scale_shift_table.device, dtype=self.scale_shift_table.dtype, non_blocking=True)

        shift_msa, scale_msa, gate_msa, c_shift_msa, c_scale_msa, c_gate_msa = (self.scale_shift_table + temb).chunk(
            6, dim=1
        )

        # 1. Self-attention
        norm_hidden_states = self.norm1(hidden_states)
        norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
        attn_output = self.attn1(hidden_states=norm_hidden_states, rotary_emb=rotary_emb)
        hidden_states = hidden_states + attn_output * gate_msa

        # 2. Cross-attention
        norm_hidden_states = self.norm2(hidden_states)
        attn_output = self.attn2(
            hidden_states=norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states,
        )
        hidden_states = hidden_states + attn_output

        # 3. Feed-forward
        norm_hidden_states = self.norm3(hidden_states)
        norm_hidden_states = norm_hidden_states * (1 + c_scale_msa) + c_shift_msa
        if self._chunk_enabled:
            ff_output = self._run_chunked_feed_forward(norm_hidden_states)
        else:
            ff_output = self.ffn(norm_hidden_states)
        hidden_states = hidden_states + ff_output * c_gate_msa

        return hidden_states

    def _run_chunked_feed_forward(self, norm_hidden_states: torch.Tensor) -> torch.Tensor:
        if self._chunk_auto:
            return self._auto_chunk_feed_forward(norm_hidden_states)
        if self._chunk_size is None:
            return self.ffn(norm_hidden_states)
        chunk_dim = self._chunk_dim if self._chunk_dim is not None else 0
        return self._chunk_feed_forward_with_params(norm_hidden_states, chunk_dim, self._chunk_size)

    def _auto_chunk_feed_forward(self, norm_hidden_states: torch.Tensor) -> torch.Tensor:
        batch = norm_hidden_states.size(0)
        seq_len = norm_hidden_states.size(1) if norm_hidden_states.ndim > 1 else None

        if batch > 1:
            chunk_dim = 0
            chunk_size = 2 if batch >= 2 else 1
            return self._chunk_feed_forward_with_params(norm_hidden_states, chunk_dim, chunk_size)

        if seq_len is not None and seq_len > 1:
            chunk_dim = 1
            desired_chunks = 4
            chunk_size = max(1, seq_len // desired_chunks)
            return self._chunk_feed_forward_with_params(norm_hidden_states, chunk_dim, chunk_size)

        return self.ffn(norm_hidden_states)

    def _chunk_feed_forward_with_params(
        self, norm_hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int
    ) -> torch.Tensor:
        if chunk_dim < 0 or chunk_dim >= norm_hidden_states.ndim:
            return self.ffn(norm_hidden_states)

        dim_extent = norm_hidden_states.size(chunk_dim)
        if dim_extent <= 1:
            return self.ffn(norm_hidden_states)

        if chunk_size <= 0:
            return self.ffn(norm_hidden_states)

        if chunk_size >= dim_extent:
            chunk_size = max(1, dim_extent // 2)
            if chunk_size == 0:
                return self.ffn(norm_hidden_states)

        if dim_extent % chunk_size != 0:
            chunk_size = math.gcd(dim_extent, chunk_size)
            if chunk_size <= 1:
                return self.ffn(norm_hidden_states)

        return _chunked_feed_forward(self.ffn, norm_hidden_states, chunk_dim, chunk_size)


class WanTransformer3DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
    r"""
    A Transformer model for video-like data used in the Wan model.

    Args:
        patch_size (`Tuple[int]`, defaults to `(1, 2, 2)`):
            3D patch dimensions for video embedding (t_patch, h_patch, w_patch).
        num_attention_heads (`int`, defaults to `40`):
            Fixed length for text embeddings.
        attention_head_dim (`int`, defaults to `128`):
            The number of channels in each head.
        in_channels (`int`, defaults to `16`):
            The number of channels in the input.
        out_channels (`int`, defaults to `16`):
            The number of channels in the output.
        text_dim (`int`, defaults to `512`):
            Input dimension for text embeddings.
        freq_dim (`int`, defaults to `256`):
            Dimension for sinusoidal time embeddings.
        ffn_dim (`int`, defaults to `13824`):
            Intermediate dimension in feed-forward network.
        num_layers (`int`, defaults to `40`):
            The number of layers of transformer blocks to use.
        window_size (`Tuple[int]`, defaults to `(-1, -1)`):
            Window size for local attention (-1 indicates global attention).
        cross_attn_norm (`bool`, defaults to `True`):
            Enable cross-attention normalization.
        qk_norm (`bool`, defaults to `True`):
            Enable query/key normalization.
        eps (`float`, defaults to `1e-6`):
            Epsilon value for normalization layers.
        add_img_emb (`bool`, defaults to `False`):
            Whether to use img_emb.
        added_kv_proj_dim (`int`, *optional*, defaults to `None`):
            The number of channels to use for the added key and value projections. If `None`, no projection is used.
        feed_forward_chunk_size (`int`, *optional*, defaults to `None`):
            When set, split feed-forward computations into smaller pieces to reduce peak memory. Can also be provided
            via the `WAN_FEED_FORWARD_CHUNK_SIZE` environment variable.
        feed_forward_chunk_dim (`int`, defaults to `0`):
            The dimension along which chunking is applied. Defaults to the batch dimension.
    """

    _supports_gradient_checkpointing = True
    _tread_router: Optional[TREADRouter] = None
    _tread_routes: Optional[List[Dict[str, Any]]] = None
    _skip_layerwise_casting_patterns = ["patch_embedding", "condition_embedder", "norm"]
    _no_split_modules = ["WanTransformerBlock"]
    _keep_in_fp32_modules = [
        "time_embedder",
        "scale_shift_table",
        "norm1",
        "norm2",
        "norm3",
    ]
    _keys_to_ignore_on_load_unexpected = ["norm_added_q"]

    def __init__(
        self,
        patch_size: Tuple[int] = (1, 2, 2),
        num_attention_heads: int = 40,
        attention_head_dim: int = 128,
        in_channels: int = 16,
        out_channels: int = 16,
        text_dim: int = 4096,
        freq_dim: int = 256,
        ffn_dim: int = 13824,
        num_layers: int = 40,
        cross_attn_norm: bool = True,
        qk_norm: Optional[str] = "rms_norm_across_heads",
        eps: float = 1e-6,
        image_dim: Optional[int] = None,
        added_kv_proj_dim: Optional[int] = None,
        rope_max_seq_len: int = 1024,
        feed_forward_chunk_size: Optional[int] = None,
        feed_forward_chunk_dim: int = 0,
    ) -> None:
        super().__init__()
        effective_out_channels = out_channels or in_channels
        self.register_to_config(
            patch_size=patch_size,
            num_attention_heads=num_attention_heads,
            attention_head_dim=attention_head_dim,
            in_channels=in_channels,
            out_channels=effective_out_channels,
            text_dim=text_dim,
            freq_dim=freq_dim,
            ffn_dim=ffn_dim,
            num_layers=num_layers,
            cross_attn_norm=cross_attn_norm,
            qk_norm=qk_norm,
            eps=eps,
            image_dim=image_dim,
            added_kv_proj_dim=added_kv_proj_dim,
            rope_max_seq_len=rope_max_seq_len,
            feed_forward_chunk_size=feed_forward_chunk_size,
            feed_forward_chunk_dim=feed_forward_chunk_dim,
        )

        inner_dim = num_attention_heads * attention_head_dim
        out_channels = effective_out_channels

        # 1. Patch & position embedding
        self.rope = WanRotaryPosEmbed(attention_head_dim, patch_size, rope_max_seq_len)
        self.patch_embedding = nn.Conv3d(in_channels, inner_dim, kernel_size=patch_size, stride=patch_size)

        # 2. Condition embeddings
        # image_embedding_dim=1280 for I2V model
        self.condition_embedder = WanTimeTextImageEmbedding(
            dim=inner_dim,
            time_freq_dim=freq_dim,
            time_proj_dim=inner_dim * 6,
            text_embed_dim=text_dim,
            image_embed_dim=image_dim,
        )

        # 3. Transformer blocks
        self.blocks = nn.ModuleList(
            [
                WanTransformerBlock(
                    inner_dim,
                    ffn_dim,
                    num_attention_heads,
                    qk_norm,
                    cross_attn_norm,
                    eps,
                    added_kv_proj_dim,
                )
                for _ in range(num_layers)
            ]
        )

        configured_chunk_size = feed_forward_chunk_size
        configured_chunk_dim = feed_forward_chunk_dim
        if configured_chunk_size is None and WAN_FEED_FORWARD_CHUNK_SIZE > 0:
            configured_chunk_size = WAN_FEED_FORWARD_CHUNK_SIZE
            configured_chunk_dim = WAN_FEED_FORWARD_CHUNK_DIM
        self._feed_forward_chunk_size = configured_chunk_size
        self._feed_forward_chunk_dim = configured_chunk_dim
        if configured_chunk_size is not None:
            self.set_chunk_feed_forward(configured_chunk_size, configured_chunk_dim)

        # 4. Output norm & projection
        self.norm_out = FP32LayerNorm(inner_dim, eps, elementwise_affine=False)
        self.proj_out = nn.Linear(inner_dim, out_channels * math.prod(patch_size))
        self.scale_shift_table = nn.Parameter(torch.randn(1, 2, inner_dim) / inner_dim**0.5)

        self.gradient_checkpointing = False
        self.force_v2_1_time_embedding: bool = False

    def set_time_embedding_v2_1(self, force_2_1_time_embedding: bool) -> None:
        """
        Force the Wan transformer to use 2.1-style time embeddings even when running Wan 2.2 checkpoints.

        Args:
            force_2_1_time_embedding: Whether to override the default time embedding behaviour.
        """
        self.force_v2_1_time_embedding = bool(force_2_1_time_embedding)
        if self.force_v2_1_time_embedding:
            logger.info("WanTransformer3DModel: Forcing Wan 2.1 style time embedding.")

    def set_router(self, router: TREADRouter, routes: List[Dict[str, Any]]):
        """Set the TREAD router and routing configuration."""
        self._tread_router = router
        self._tread_routes = routes

    def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0) -> None:
        """
        Configure feed-forward chunking for all transformer blocks.
        """
        self._feed_forward_chunk_size = chunk_size
        self._feed_forward_chunk_dim = dim
        for block in self.blocks:
            block.set_chunk_feed_forward(chunk_size, dim)

    @staticmethod
    def _route_rope(rope, info, keep_len: int, batch: int):
        """
        Apply the router's (ids_shuffle → slice) transform to rotary embeddings.
        Adapted for Wan's rotary embedding format which encodes 3D position (T,H,W).

        Note: Wan's rotary embeddings encode temporal, height, and width positions
        separately in different dimensions of the embedding. When routing, we maintain
        the correspondence between tokens and their 3D positional encoding.
        """

        # rope is a tensor of shape (1, 1, S, D) for Wan
        # where S = t_tokens * h_tokens * w_tokens
        def _route_one(r: torch.Tensor) -> torch.Tensor:
            # Remove the batch dimensions temporarily
            r_squeeze = r.squeeze(0).squeeze(0)  # (S, D)
            # Expand to batch size
            rB = r_squeeze.unsqueeze(0).expand(batch, -1, -1)  # (B, S, D)
            # Apply shuffle - this maintains the token-position correspondence
            shuf = torch.take_along_dim(rB, info.ids_shuffle.unsqueeze(-1).expand_as(rB), dim=1)
            # Keep only the selected tokens and add back the extra dimension
            return shuf[:, :keep_len, :].unsqueeze(1)  # (B, 1, keep_len, D)

        return _route_one(rope)

    def forward(
        self,
        hidden_states: torch.Tensor,
        timestep: torch.LongTensor,
        encoder_hidden_states: torch.Tensor,
        encoder_hidden_states_image: Optional[torch.Tensor] = None,
        skip_layers: Optional[List[int]] = None,
        return_dict: bool = True,
        attention_kwargs: Optional[Dict[str, Any]] = None,
        force_keep_mask: Optional[torch.Tensor] = None,
    ) -> Union[torch.Tensor, Dict[str, torch.Tensor]]:
        if attention_kwargs is not None:
            attention_kwargs = attention_kwargs.copy()
            lora_scale = attention_kwargs.pop("scale", 1.0)
        else:
            lora_scale = 1.0

        if USE_PEFT_BACKEND:
            scale_lora_layers(self, lora_scale)
        else:
            if attention_kwargs is not None and attention_kwargs.get("scale", None) is not None:
                logger.warning("Passing `scale` via `attention_kwargs` when not using the PEFT backend is ineffective.")

        batch_size, num_channels, num_frames, height, width = hidden_states.shape
        p_t, p_h, p_w = self.config.patch_size
        post_patch_num_frames = num_frames // p_t
        post_patch_height = height // p_h
        post_patch_width = width // p_w

        rotary_emb = self.rope(hidden_states)

        hidden_states = self.patch_embedding(hidden_states)
        hidden_states = hidden_states.flatten(2).transpose(1, 2)

        if self.force_v2_1_time_embedding and timestep.dim() > 1:
            # Wan 2.1 uses a single timestep per batch entry. When forcing 2.1 behaviour with Wan 2.2
            # checkpoints we fall back to the first timestep value which matches the reference implementation.
            timestep = timestep[..., 0].contiguous()

        temb, timestep_proj, encoder_hidden_states, encoder_hidden_states_image = self.condition_embedder(
            timestep, encoder_hidden_states, encoder_hidden_states_image
        )
        timestep_proj = timestep_proj.unflatten(1, (6, -1))

        if encoder_hidden_states_image is not None:
            encoder_hidden_states = torch.concat([encoder_hidden_states_image, encoder_hidden_states], dim=1)

        # TREAD initialization
        # Note: In Wan, video tokens and text tokens are kept separate
        # - hidden_states contains video tokens (B, S_video, D)
        # - encoder_hidden_states contains text tokens (B, S_text, D)
        # We only route video tokens, text tokens remain unchanged
        routes = self._tread_routes or []
        router = self._tread_router
        use_routing = self.training and len(routes) > 0 and torch.is_grad_enabled()
        route_ptr = 0
        routing_now = False
        tread_mask_info = None
        saved_tokens = None
        current_rope = rotary_emb

        # Handle negative route indices
        if routes:
            total_layers = len(self.blocks)

            def _to_pos(idx):
                return idx if idx >= 0 else total_layers + idx

            routes = [
                {
                    **r,
                    "start_layer_idx": _to_pos(r["start_layer_idx"]),
                    "end_layer_idx": _to_pos(r["end_layer_idx"]),
                }
                for r in routes
            ]

        # Transformer blocks with TREAD routing
        for i, block in enumerate(self.blocks):
            # TREAD: START a route?
            if use_routing and route_ptr < len(routes) and i == routes[route_ptr]["start_layer_idx"]:
                mask_ratio = routes[route_ptr]["selection_ratio"]

                # Apply routing to video tokens only
                # Note: encoder_hidden_states (text) is never routed, only passed to cross-attention
                tread_mask_info = router.get_mask(
                    hidden_states,  # (B, S_video, D) where S_video = T*H*W tokens
                    mask_ratio=mask_ratio,
                    force_keep=force_keep_mask,
                )
                saved_tokens = hidden_states.clone()
                hidden_states = router.start_route(hidden_states, tread_mask_info)
                routing_now = True

                # Route the rotary embeddings to match the selected video tokens
                # This preserves the 3D positional information for kept tokens
                current_rope = self._route_rope(
                    rotary_emb,
                    tread_mask_info,
                    keep_len=hidden_states.size(1),
                    batch=hidden_states.size(0),
                )

            # Skip layers if specified
            if skip_layers is not None and i in skip_layers:
                continue

            # Apply transformer block
            # Each block does:
            # 1. Self-attention on video tokens (with rotary embeddings)
            # 2. Cross-attention from video to text tokens
            # 3. Feed-forward on video tokens
            # Only video tokens are routed; text tokens always remain full sequence
            if torch.is_grad_enabled() and self.gradient_checkpointing:
                hidden_states = self._gradient_checkpointing_func(
                    block,
                    hidden_states,  # video tokens (possibly routed)
                    encoder_hidden_states,  # text tokens (always full sequence)
                    timestep_proj,
                    current_rope,  # rotary embeddings (possibly routed)
                )
            else:
                hidden_states = block(hidden_states, encoder_hidden_states, timestep_proj, current_rope)

            # TREAD: END the current route?
            if routing_now and i == routes[route_ptr]["end_layer_idx"]:
                hidden_states = router.end_route(
                    hidden_states,
                    tread_mask_info,
                    original_x=saved_tokens,
                )
                routing_now = False
                route_ptr += 1
                current_rope = rotary_emb

        # Output processing remains the same
        shift, scale = (self.scale_shift_table + temb.unsqueeze(1)).chunk(2, dim=1)
        shift = shift.to(hidden_states.device)
        scale = scale.to(hidden_states.device)

        hidden_states = (self.norm_out(hidden_states.float()) * (1 + scale) + shift).type_as(hidden_states)
        hidden_states = self.proj_out(hidden_states)

        hidden_states = hidden_states.reshape(
            batch_size,
            post_patch_num_frames,
            post_patch_height,
            post_patch_width,
            p_t,
            p_h,
            p_w,
            -1,
        )
        hidden_states = hidden_states.permute(0, 7, 1, 4, 2, 5, 3, 6)
        output = hidden_states.flatten(6, 7).flatten(4, 5).flatten(2, 3)

        if USE_PEFT_BACKEND:
            unscale_lora_layers(self, lora_scale)

        if not return_dict:
            return (output,)

        return Transformer2DModelOutput(sample=output)
