from abc import abstractmethod
import math
import numpy as np
import os
import torch as th
import torch.nn as nn
import torch.nn.functional as F

from .fp16_util import convert_module_to_f16, convert_module_to_f32
from .nn import (
    checkpoint,
    linear,
    zero_module,
    normalization,
    timestep_embedding,
)

class TimestepBlock(nn.Module):
    """
    Any module where forward() takes timestep embeddings as a second argument.
    """

    @abstractmethod
    def forward(self, x, emb, time_cond):
        """
        Apply the module to `x` given `emb` timestep embeddings and `time_cond` biological time conditioning.
        """

class TimestepEmbedSequential(nn.Sequential, TimestepBlock):
    """
    A sequential module that passes timestep embeddings to the children that
    support it as an extra input.
    """

    def forward(self, x, emb, time_cond):
        for layer in self:
            if isinstance(layer, TimestepBlock):
                x = layer(x, emb, time_cond)
            else:
                x = layer(x)
        return x

class Upsample(nn.Module):
    """
    An upsampling layer with an optional convolution.
    """

    def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
        super().__init__()
        self.channels = channels
        self.out_channels = out_channels or channels
        self.use_conv = use_conv
        self.dims = dims
        if use_conv:
            self.conv = nn.Conv2d(self.channels, self.out_channels, 3, padding=padding)

    def forward(self, x):
        assert x.shape[1] == self.channels
        if self.dims == 3:
            x = F.interpolate(x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest")
        else:
            x = F.interpolate(x, scale_factor=2, mode="nearest")
        if self.use_conv:
            x = self.conv(x)
        return x

class Downsample(nn.Module):
    """
    A downsampling layer with an optional convolution.
    """

    def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):
        super().__init__()
        self.channels = channels
        self.out_channels = out_channels or channels
        self.use_conv = use_conv
        self.dims = dims
        stride = 2 if dims != 3 else (1, 2, 2)
        if use_conv:
            self.op = nn.Conv2d(
                self.channels, self.out_channels, 3, stride=stride, padding=padding
            )
        else:
            assert self.channels == self.out_channels
            self.op = nn.AvgPool2d(kernel_size=stride, stride=stride)

    def forward(self, x):
        assert x.shape[1] == self.channels
        return self.op(x)

class ResBlock(TimestepBlock):
    """
    A residual block that can optionally change the number of channels.
    """

    def __init__(
        self,
        channels,
        emb_channels,
        dropout,
        out_channels=None,
        use_conv=False,
        use_scale_shift_norm=False,
        dims=2,
        use_checkpoint=False,
        up=False,
        down=False,
        time_cond_channels=None,
    ):
        super().__init__()
        self.channels = channels
        self.emb_channels = emb_channels
        self.dropout = dropout
        self.out_channels = out_channels or channels
        self.use_conv = use_conv
        self.use_checkpoint = use_checkpoint
        self.use_scale_shift_norm = use_scale_shift_norm
        
        self.in_layers = nn.Sequential(
            normalization(channels),
            nn.SiLU(),
            nn.Conv2d(channels, self.out_channels, 3, padding=1),
        )

        self.updown = up or down

        if up:
            self.h_upd = Upsample(channels, False, dims)
            self.x_upd = Upsample(channels, False, dims)
        elif down:
            self.h_upd = Downsample(channels, False, dims)
            self.x_upd = Downsample(channels, False, dims)
        else:
            self.h_upd = self.x_upd = nn.Identity()

        self.emb_layers = nn.Sequential(
            nn.SiLU(),
            linear(
                emb_channels,
                2 * self.out_channels if use_scale_shift_norm else self.out_channels,
            ),
        )
        
        # Biological time conditioning
        if time_cond_channels is not None:
            self.time_cond_layers = nn.Sequential(
                nn.SiLU(),
                linear(
                    time_cond_channels,
                    2 * self.out_channels if self.use_scale_shift_norm else self.out_channels,
                ),
            )
        else:
            self.time_cond_layers = None
            
        self.out_layers = nn.Sequential(
            normalization(self.out_channels),
            nn.SiLU(),
            nn.Dropout(p=dropout),
            zero_module(nn.Conv2d(self.out_channels, self.out_channels, 3, padding=1)),
        )

        if self.out_channels == channels:
            self.skip_connection = nn.Identity()
        elif use_conv:
            self.skip_connection = nn.Conv2d(channels, self.out_channels, 3, padding=1)
        else:
            self.skip_connection = nn.Conv2d(channels, self.out_channels, 1)

    def forward(self, x, emb, time_cond=None):
        """
        Apply the block to a Tensor, conditioned on a timestep embedding.
        
        :param x: an [N x C x ...] Tensor of features.
        :param emb: an [N x emb_channels] Tensor of timestep embeddings.
        :param time_cond: an [N x time_cond_channels] Tensor of biological time conditioning.
        :return: an [N x C x ...] Tensor of outputs.
        """
        return checkpoint(
            self._forward, (x, emb, time_cond), self.parameters(), self.use_checkpoint
        )

    def _forward(self, x, emb, time_cond=None):
        if self.updown:
            in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]
            h = in_rest(x)
            h = self.h_upd(h)
            x = self.x_upd(x)
            h = in_conv(h)
        else:
            h = self.in_layers(x)
            
        emb_out = self.emb_layers(emb).type(h.dtype)
        while len(emb_out.shape) < len(h.shape):
            emb_out = emb_out[..., None]
            
        # Add biological time conditioning
        if time_cond is not None and self.time_cond_layers is not None:
            time_cond_out = self.time_cond_layers(time_cond).type(h.dtype)
            while len(time_cond_out.shape) < len(h.shape):
                time_cond_out = time_cond_out[..., None]
            emb_out = emb_out + time_cond_out
            
        if self.use_scale_shift_norm:
            out_norm, out_rest = self.out_layers[0], self.out_layers[1:]
            scale, shift = th.chunk(emb_out, 2, dim=1)
            h = out_norm(h) * (1 + scale) + shift
            h = out_rest(h)
        else:
            h = h + emb_out
            h = self.out_layers(h)
        return self.skip_connection(x) + h

class AttentionBlock(nn.Module):
    """
    An attention block that allows spatial positions to attend to each other.
    """

    def __init__(
        self,
        channels,
        num_heads=1,
        num_head_channels=-1,
        use_checkpoint=False,
        use_new_attention_order=False,
    ):
        super().__init__()
        self.channels = channels
        if num_head_channels == -1:
            self.num_heads = num_heads
        else:
            assert (
                channels % num_head_channels == 0
            ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}"
            self.num_heads = channels // num_head_channels
        self.use_checkpoint = use_checkpoint
        self.norm = normalization(channels)
        self.qkv = nn.Conv1d(channels, channels * 3, 1)
        if use_new_attention_order:
            # split qkv before split heads
            self.attention = QKVAttention(self.num_heads)
        else:
            # split heads before split qkv
            self.attention = QKVAttentionLegacy(self.num_heads)

        self.proj_out = zero_module(nn.Conv1d(channels, channels, 1))

    def forward(self, x):
        return checkpoint(self._forward, (x,), self.parameters(), True)

    def _forward(self, x):
        b, c, *spatial = x.shape
        x = x.reshape(b, c, -1)
        qkv = self.qkv(self.norm(x))
        h = self.attention(qkv)
        h = self.proj_out(h)
        return (x + h).reshape(b, c, *spatial)

class QKVAttentionLegacy(nn.Module):
    """
    A module which performs QKV attention. Matches legacy QKVAttention.
    """

    def __init__(self, n_heads):
        super().__init__()
        self.n_heads = n_heads

    def forward(self, qkv):
        """
        Apply QKV attention.
        :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs.
        :return: an [N x (H * C) x T] tensor after attention.
        """
        bs, width, length = qkv.shape
        assert width % (3 * self.n_heads) == 0
        ch = width // (3 * self.n_heads)
        q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1)
        scale = 1 / math.sqrt(math.sqrt(ch))
        weight = th.einsum(
            "bct,bcs->bts", q * scale, k * scale
        )  # More stable with f16 than dividing afterwards
        weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
        a = th.einsum("bts,bcs->bct", weight, v)
        return a.reshape(bs, -1, length)

class QKVAttention(nn.Module):
    """
    A module which performs QKV attention and splits in a different order.
    """

    def __init__(self, n_heads):
        super().__init__()
        self.n_heads = n_heads

    def forward(self, qkv):
        """
        Apply QKV attention.
        :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs.
        :return: an [N x (H * C) x T] tensor after attention.
        """
        bs, width, length = qkv.shape
        assert width % (3 * self.n_heads) == 0
        ch = width // (3 * self.n_heads)
        q, k, v = qkv.chunk(3, dim=1)
        scale = 1 / math.sqrt(math.sqrt(ch))
        weight = th.einsum(
            "bct,bcs->bts",
            (q * scale).view(bs * self.n_heads, ch, length),
            (k * scale).view(bs * self.n_heads, ch, length),
        )  # More stable with f16 than dividing afterwards
        weight = th.softmax(weight.float(), dim=-1).type(weight.dtype)
        a = th.einsum(
            "bts,bcs->bct",
            weight,
            v.view(bs * self.n_heads, ch, length),
        )
        return a.reshape(bs, -1, length)

class UNetModel(nn.Module):
    """
    The full UNet model with attention and timestep embedding for Hi-C data.
    Now includes semantic encoder for zsem conditioning.
    """

    def __init__(
        self,
        image_size,
        in_channels,
        model_channels,
        out_channels,
        num_res_blocks,
        attention_resolutions,
        dropout=0,
        channel_mult=(1, 2, 4, 8),
        conv_resample=True,
        dims=2,
        num_classes=None,
        use_checkpoint=False,
        use_fp16=False,
        num_heads=-1,
        num_head_channels=-1,
        num_heads_upsample=-1,
        use_scale_shift_norm=False,
        resblock_updown=False,
        use_new_attention_order=False,
        use_spatial_transformer=False,
        transformer_depth=1,
        context_dim=None,
        time_cond_dim=128,  # Biological time conditioning dimension
        use_transition_anchors=True,
        cond_drop_prob=0.0,
        sem_cond_dim=256,   # Semantic conditioning dimension
        use_semantic_encoder=True,  # Whether to use built-in semantic encoder
        sem_drop_prob=0.1,  # Classifier-free dropout for semantic conditioning
        use_vae_encoder=False,  # Whether to use VAE encoder for semantic encoding
        vae_encoder_path='',  # Path to pre-trained VAE encoder
        freeze_vae_encoder=True,  # Whether to freeze VAE encoder weights
    ):
        super().__init__()

        if use_spatial_transformer:
            assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'

        if context_dim is not None:
            assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
            from omegaconf.listconfig import ListConfig
            if type(context_dim) == ListConfig:
                context_dim = list(context_dim)

        if num_heads_upsample == -1:
            num_heads_upsample = num_heads

        if num_heads == -1:
            assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'

        if num_head_channels == -1:
            assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'

        self.image_size = image_size
        self.in_channels = in_channels
        self.model_channels = model_channels
        self.out_channels = out_channels
        self.num_res_blocks = num_res_blocks
        self.attention_resolutions = attention_resolutions
        self.dropout = dropout
        self.channel_mult = channel_mult
        self.conv_resample = conv_resample
        self.num_classes = num_classes
        self.use_checkpoint = use_checkpoint
        self.dtype = th.float16 if use_fp16 else th.float32
        self.num_heads = num_heads
        self.num_head_channels = num_head_channels
        self.num_heads_upsample = num_heads_upsample
        self.predict_codebook_ids = getattr(self, 'predict_codebook_ids', False)
        self.time_cond_dim = time_cond_dim
        self.use_transition_anchors = use_transition_anchors
        self.cond_drop_prob = cond_drop_prob
        
        # Semantic conditioning
        self.sem_cond_dim = sem_cond_dim
        self.use_semantic_encoder = use_semantic_encoder
        self.sem_drop_prob = sem_drop_prob
        self.use_vae_encoder = use_vae_encoder
        self.vae_encoder_path = vae_encoder_path
        self.freeze_vae_encoder = freeze_vae_encoder

        time_embed_dim = model_channels * 4
        self.time_embed = nn.Sequential(
            linear(model_channels, time_embed_dim),
            nn.SiLU(),
            linear(time_embed_dim, time_embed_dim),
        )
        
        # Biological time conditioning
        if time_cond_dim > 0:
            self.bio_time_embed = nn.Sequential(
                linear(128, time_cond_dim),  # 128 from sinusoidal embedding
                nn.SiLU(),
                linear(time_cond_dim, time_cond_dim),
            )
            if self.use_transition_anchors:
                # Learn two anchor vectors representing early and late distributions
                self.transition_anchor_early = nn.Parameter(th.randn(128))
                self.transition_anchor_late = nn.Parameter(th.randn(128))
        else:
            self.bio_time_embed = None

        # Semantic conditioning
        if sem_cond_dim > 0:
            # Projection layer to map semantic features to embedding space
            self.sem_cond_proj = nn.Sequential(
                linear(sem_cond_dim, time_embed_dim),
                nn.SiLU(),
                linear(time_embed_dim, time_embed_dim),
            )
            
            # Built-in semantic encoder (optional)
            if use_semantic_encoder:
                self.semantic_encoder = self._build_semantic_encoder()
                
                # Load pre-trained VAE encoder if specified
                if use_vae_encoder and vae_encoder_path and os.path.exists(vae_encoder_path):
                    self.load_vae_encoder(vae_encoder_path, freeze_vae_encoder)
        else:
            self.sem_cond_proj = None
            self.semantic_encoder = None

        if self.num_classes is not None:
            self.label_emb = nn.Embedding(num_classes, time_embed_dim)

        self.input_blocks = nn.ModuleList(
            [
                TimestepEmbedSequential(
                    nn.Conv2d(in_channels, model_channels, 3, padding=1)
                )
            ]
        )
        self._feature_size = model_channels
        input_block_chans = [model_channels]
        ch = model_channels
        ds = 1
        for level, mult in enumerate(channel_mult):
            for _ in range(num_res_blocks):
                layers = [
                    ResBlock(
                        ch,
                        time_embed_dim,
                        dropout,
                        out_channels=mult * model_channels,
                        dims=dims,
                        use_checkpoint=use_checkpoint,
                        use_scale_shift_norm=use_scale_shift_norm,
                        time_cond_channels=time_cond_dim if self.bio_time_embed else None,
                    )
                ]
                ch = mult * model_channels
                if ds in attention_resolutions:
                    if num_head_channels == -1:
                        dim_head = ch // num_heads
                    else:
                        num_heads = ch // num_head_channels
                        dim_head = num_head_channels
                    if legacy:
                        #num_heads = 1
                        dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
                    layers.append(
                        AttentionBlock(
                            ch,
                            use_checkpoint=use_checkpoint,
                            num_heads=num_heads,
                            num_head_channels=dim_head,
                            use_new_attention_order=use_new_attention_order,
                        ) if not use_spatial_transformer else SpatialTransformer(
                            ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
                        )
                    )
                self.input_blocks.append(TimestepEmbedSequential(*layers))
                self._feature_size += ch
                input_block_chans.append(ch)
            if level != len(channel_mult) - 1:
                out_ch = ch
                self.input_blocks.append(
                    TimestepEmbedSequential(
                        ResBlock(
                            ch,
                            time_embed_dim,
                            dropout,
                            out_channels=out_ch,
                            dims=dims,
                            use_checkpoint=use_checkpoint,
                            use_scale_shift_norm=use_scale_shift_norm,
                            down=True,
                            time_cond_channels=time_cond_dim if self.bio_time_embed else None,
                        )
                        if resblock_updown
                        else Downsample(
                            ch, conv_resample, dims=dims, out_channels=out_ch
                        )
                    )
                )
                ch = out_ch
                input_block_chans.append(ch)
                ds *= 2
                self._feature_size += ch

        if num_head_channels == -1:
            dim_head = ch // num_heads
        else:
            num_heads = ch // num_head_channels
            dim_head = num_head_channels
        if legacy:
            #num_heads = 1
            dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
        self.middle_block = TimestepEmbedSequential(
            ResBlock(
                ch,
                time_embed_dim,
                dropout,
                dims=dims,
                use_checkpoint=use_checkpoint,
                use_scale_shift_norm=use_scale_shift_norm,
                time_cond_channels=time_cond_dim if self.bio_time_embed else None,
            ),
            AttentionBlock(
                ch,
                use_checkpoint=use_checkpoint,
                num_heads=num_heads,
                num_head_channels=dim_head,
                use_new_attention_order=use_new_attention_order,
            ) if not use_spatial_transformer else SpatialTransformer(
                            ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
                        ),
            ResBlock(
                ch,
                time_embed_dim,
                dropout,
                dims=dims,
                use_checkpoint=use_checkpoint,
                use_scale_shift_norm=use_scale_shift_norm,
                time_cond_channels=time_cond_dim if self.bio_time_embed else None,
            ),
        )
        self._feature_size += ch

        self.output_blocks = nn.ModuleList([])
        for level, mult in list(enumerate(channel_mult))[::-1]:
            for i in range(num_res_blocks + 1):
                ich = input_block_chans.pop()
                layers = [
                    ResBlock(
                        ch + ich,
                        time_embed_dim,
                        dropout,
                        out_channels=model_channels * mult,
                        dims=dims,
                        use_checkpoint=use_checkpoint,
                        use_scale_shift_norm=use_scale_shift_norm,
                        time_cond_channels=time_cond_dim if self.bio_time_embed else None,
                    )
                ]
                ch = model_channels * mult
                if ds in attention_resolutions:
                    if num_head_channels == -1:
                        dim_head = ch // num_heads
                    else:
                        num_heads = ch // num_head_channels
                        dim_head = num_head_channels
                    if legacy:
                        #num_heads = 1
                        dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
                    layers.append(
                        AttentionBlock(
                            ch,
                            use_checkpoint=use_checkpoint,
                            num_heads=num_heads_upsample,
                            num_head_channels=dim_head,
                            use_new_attention_order=use_new_attention_order,
                        ) if not use_spatial_transformer else SpatialTransformer(
                            ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim
                        )
                    )
                if level and i == num_res_blocks:
                    out_ch = ch
                    layers.append(
                        ResBlock(
                            ch,
                            time_embed_dim,
                            dropout,
                            out_channels=out_ch,
                            dims=dims,
                            use_checkpoint=use_checkpoint,
                            use_scale_shift_norm=use_scale_shift_norm,
                            up=True,
                            time_cond_channels=time_cond_dim if self.bio_time_embed else None,
                        )
                        if resblock_updown
                        else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)
                    )
                    ds //= 2
                self.output_blocks.append(TimestepEmbedSequential(*layers))
                self._feature_size += ch

        self.out = nn.Sequential(
            normalization(ch),
            nn.SiLU(),
            zero_module(nn.Conv2d(model_channels, out_channels, 3, padding=1)),
        )
        if self.predict_codebook_ids:
            self.id_predictor = nn.Sequential(
                normalization(ch),
                nn.Conv2d(model_channels, self.out_channels, 1),
                #nn.LogSoftmax(dim=1)  # change to cross_entropy and produce non-normalized logits
            )
    
    def _build_semantic_encoder(self):
        """Build semantic encoder network - can be VAE or simple CNN"""
        if getattr(self, 'use_vae_encoder', False):
            # Use VAE encoder if specified
            from .vae_encoder import VAEEncoder
            encoder = VAEEncoder(
                in_channels=self.in_channels,
                latent_dim=self.sem_cond_dim,
                base_channels=64,
                channel_multipliers=[1, 2, 4, 8],
                num_res_blocks=2,
                dropout=0.1,
                use_attention=True,
                beta=1e-3,
            )
            return encoder
        else:
            # Simple encoder: Conv layers + Global pool + FC
            encoder_layers = []
            
            # Initial conv layer
            in_ch = self.in_channels
            base_ch = 64
            
            # Downsampling layers
            for i, mult in enumerate([1, 2, 4, 8]):
                out_ch = base_ch * mult
                encoder_layers.extend([
                    nn.Conv2d(in_ch, out_ch, 3, stride=2, padding=1),
                    nn.GroupNorm(8, out_ch),
                    nn.SiLU(),
                ])
                in_ch = out_ch
            
            # Global average pooling
            encoder_layers.append(nn.AdaptiveAvgPool2d(1))
            
            # Final layers
            encoder = nn.Sequential(*encoder_layers)
            
            # Project to semantic dimension
            self.sem_proj = nn.Sequential(
                nn.Linear(in_ch, self.sem_cond_dim * 2),
                nn.SiLU(),
                nn.Linear(self.sem_cond_dim * 2, self.sem_cond_dim),
            )
            
            return encoder

    def encode_semantic(self, x):
        """Encode input Hi-C matrix to semantic features"""
        if self.semantic_encoder is None:
            raise ValueError("Semantic encoder not enabled. Set use_semantic_encoder=True")
        
        if getattr(self, 'use_vae_encoder', False):
            # VAE encoder returns z, mu, logvar
            z, mu, logvar = self.semantic_encoder(x)
            return z if self.training else mu  # Use z during training, mu during inference
        else:
            # Simple encoder
            # Forward through encoder
            features = self.semantic_encoder(x)  # [B, C, 1, 1]
            features = features.view(features.shape[0], -1)  # [B, C]
            
            # Project to semantic space
            sem_features = self.sem_proj(features)  # [B, sem_cond_dim]
            
            return sem_features
    
    def load_vae_encoder(self, vae_encoder_path, freeze_encoder=True):
        """Load pre-trained VAE encoder"""
        if not self.use_semantic_encoder:
            raise ValueError("Semantic encoder not enabled")
        
        import torch
        encoder_checkpoint = torch.load(vae_encoder_path, map_location='cpu')
        
        if hasattr(self, 'semantic_encoder') and hasattr(self.semantic_encoder, 'load_state_dict'):
            # Load VAE encoder weights
            self.semantic_encoder.load_state_dict(encoder_checkpoint['encoder_state_dict'])
            
            # Freeze encoder weights if specified
            if freeze_encoder:
                for param in self.semantic_encoder.parameters():
                    param.requires_grad = False
                print(f"VAE encoder loaded and frozen from {vae_encoder_path}")
            else:
                print(f"VAE encoder loaded (trainable) from {vae_encoder_path}")
        else:
            raise ValueError("Cannot load VAE encoder: semantic encoder not properly initialized")
    
    def get_semantic_loss(self, x):
        """Get semantic regularization loss (KL loss for VAE)"""
        if not getattr(self, 'use_vae_encoder', False) or self.semantic_encoder is None:
            return torch.tensor(0.0, device=x.device)
        
        with torch.set_grad_enabled(self.training):
            z, mu, logvar = self.semantic_encoder(x)
            kl_loss = self.semantic_encoder.kl_loss(mu, logvar)
            return kl_loss

    def convert_to_fp16(self):
        """
        Convert the torso of the model to float16.
        """
        self.input_blocks.apply(convert_module_to_f16)
        self.middle_block.apply(convert_module_to_f16)
        self.output_blocks.apply(convert_module_to_f16)

    def convert_to_fp32(self):
        """
        Convert the torso of the model to float32.
        """
        self.input_blocks.apply(convert_module_to_f32)
        self.middle_block.apply(convert_module_to_f32)
        self.output_blocks.apply(convert_module_to_f32)

    def forward(self, x, timesteps=None, context=None, y=None, bio_time=None, sem_cond=None, **kwargs):
        """
        Apply the model to an input batch.

        :param x: an [N x C x ...] Tensor of inputs.
        :param timesteps: a 1-D batch of timesteps.
        :param context: conditioning plugged in via crossattn
        :param y: an [N] Tensor of labels, if class-conditional.
        :param bio_time: biological time for interpolation, range [0, 1]
        :param sem_cond: semantic conditioning features [N x sem_cond_dim]
        :return: an [N x C x ...] Tensor of outputs.
        """
        assert (y is not None) == (
            self.num_classes is not None
        ), "must specify y if and only if the model is class-conditional"

        hs = []
        t_emb = timestep_embedding(timesteps, self.model_channels)
        emb = self.time_embed(t_emb)
        
        # Biological time conditioning
        bio_time_emb = None
        if bio_time is not None and self.bio_time_embed is not None:
            if self.use_transition_anchors:
                # Mix learnable early/late anchors using stage alpha in [0,1]
                if bio_time.dim() == 0:
                    bio_time = bio_time.unsqueeze(0)
                alpha = bio_time.view(-1, 1)
                anchor_mix = (1.0 - alpha) * self.transition_anchor_early[None, :] + alpha * self.transition_anchor_late[None, :]
                bio_input = anchor_mix
            else:
                # Sinusoidal embedding of scalar stage if anchors are disabled
                bio_input = timestep_embedding(bio_time * 1000, 128)
            bio_time_emb = self.bio_time_embed(bio_input)
            # Classifier-free conditioning dropout during training
            if self.training and self.cond_drop_prob > 0.0:
                drop = (th.rand(bio_time_emb.shape[0], device=bio_time_emb.device) < self.cond_drop_prob).float().view(-1, 1)
                bio_time_emb = bio_time_emb * (1.0 - drop)

        # Semantic conditioning
        if sem_cond is not None and self.sem_cond_proj is not None:
            # If no explicit semantic conditioning provided, try to encode from x_start
            if sem_cond is None and self.use_semantic_encoder and 'x_start' in kwargs:
                sem_cond = self.encode_semantic(kwargs['x_start'])
            
            if sem_cond is not None:
                sem_emb = self.sem_cond_proj(sem_cond)
                
                # Classifier-free conditioning dropout during training
                if self.training and self.sem_drop_prob > 0.0:
                    drop_mask = (th.rand(sem_emb.shape[0], device=sem_emb.device) < self.sem_drop_prob).float().view(-1, 1)
                    sem_emb = sem_emb * (1.0 - drop_mask)
                
                # Add semantic conditioning to main embedding
                emb = emb + sem_emb

        if self.num_classes is not None:
            assert y.shape == (x.shape[0],)
            emb = emb + self.label_emb(y)

        h = x.type(self.dtype)
        for module in self.input_blocks:
            h = module(h, emb, bio_time_emb)
            hs.append(h)
        h = self.middle_block(h, emb, bio_time_emb)
        for module in self.output_blocks:
            h = th.cat([h, hs.pop()], dim=1)
            h = module(h, emb, bio_time_emb)

        h = h.type(x.dtype)
        if self.predict_codebook_ids:
            return self.id_predictor(h)
        else:
            return self.out(h)

# Set legacy flag for backwards compatibility
legacy = False


