import math
from copy import deepcopy
from typing import Callable

import torch
from torch import Tensor, nn

from comfy.ldm.flux.layers import (
    DoubleStreamBlock,
    Modulation,
    SelfAttention,
    apply_mod,
    attention,
)
from comfy.ldm.flux.model import Flux
from comfy.model_base import ModelType
from comfy.model_patcher import ModelPatcher


def get_emb(sin_inp):
    """
    Gets a base embedding for one dimension with sin and cos intertwined
    """
    emb = torch.stack((sin_inp.sin(), sin_inp.cos()), dim=-1)
    return torch.flatten(emb, -2, -1)


class PositionalEncoding1D(nn.Module):
    def __init__(self, channels: int, device: torch.device = torch.device("cuda")):
        """
        :param channels: The last dimension of the tensor you want to apply pos emb to.
        """
        super().__init__()
        self.org_channels = channels
        channels = int(math.ceil(channels / 2) * 2)
        self.channels = channels
        inv_freq = 1.0 / (
            10000 ** (torch.arange(0, channels, 2, device=device).float() / channels)
        )
        self.register_buffer("inv_freq", inv_freq)
        self.register_buffer("cached_penc", None, persistent=False)

    def forward(self, tensor):
        """
        :param tensor: A 3d tensor of size (batch_size, x, ch)
        :return: Positional Encoding Matrix of size (batch_size, x, ch)
        """
        if len(tensor.shape) != 3:
            raise RuntimeError("The input tensor has to be 3d!")

        if self.cached_penc is not None and self.cached_penc.shape == tensor.shape:
            return self.cached_penc

        self.cached_penc = None
        batch_size, x, orig_ch = tensor.shape
        pos_x = torch.arange(x, device=tensor.device, dtype=self.inv_freq.dtype)
        sin_inp_x = torch.einsum("i,j->ij", pos_x, self.inv_freq)
        emb_x = get_emb(sin_inp_x)
        emb = torch.zeros((x, self.channels), device=tensor.device, dtype=tensor.dtype)
        emb[:, : self.channels] = emb_x

        self.cached_penc = emb[None, :, :orig_ch].repeat(batch_size, 1, 1)
        return self.cached_penc


def make_forward_fn(self: DoubleStreamBlock, hidden_size: int) -> Callable:
    tenor_pe = PositionalEncoding1D(hidden_size)

    def compass_double_stream_block_forward(
        img: Tensor,
        txt: Tensor,
        vec: Tensor,
        pe: Tensor,
        attn_mask=None,
        modulation_dims_img=None,
        modulation_dims_txt=None,
    ):
        img_mod1, img_mod2 = self.img_mod(vec)
        txt_mod1, txt_mod2 = self.txt_mod(vec)

        # prepare image for attention
        img_modulated = self.img_norm1(img)
        img_modulated = apply_mod(
            img_modulated, (1 + img_mod1.scale), img_mod1.shift, modulation_dims_img
        )
        img_qkv = self.img_attn.qkv(img_modulated)
        img_q, img_k, img_v = img_qkv.view(
            img_qkv.shape[0], img_qkv.shape[1], 3, self.num_heads, -1
        ).permute(2, 0, 3, 1, 4)
        img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)

        # prepare txt for attention
        txt_modulated = self.txt_norm1(txt)
        txt_modulated = apply_mod(
            txt_modulated, (1 + txt_mod1.scale), txt_mod1.shift, modulation_dims_txt
        )
        txt_qkv = self.txt_attn.qkv(txt_modulated)
        # initial q and k are discarded
        _, _, txt_v = txt_qkv.view(
            txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1
        ).permute(2, 0, 3, 1, 4)

        # CoMPaSS' single important change:
        # Override Q and K with Q and K adding PE, do not change V.
        # The original implementation:
        #   <https://github.com/blurgyy/CoMPaSS/blob/4b51c28eb15943bd4eafe1437d114f90c1bb4705/TENOR/flux/src/flux/modules/layers.py#L275-L283>
        txt_modulated_with_pe = txt_modulated + tenor_pe(txt_modulated)
        txt_qkv_with_pe = self.txt_attn.qkv(txt_modulated_with_pe)
        txt_q, txt_k, _ = txt_qkv_with_pe.view(
            txt_qkv.shape[0], txt_qkv.shape[1], 3, self.num_heads, -1
        ).permute(2, 0, 3, 1, 4)
        # CoMPaSS operation ends

        txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)

        if self.flipped_img_txt:
            # run actual attention
            attn = attention(
                torch.cat((img_q, txt_q), dim=2),
                torch.cat((img_k, txt_k), dim=2),
                torch.cat((img_v, txt_v), dim=2),
                pe=pe,
                mask=attn_mask,
            )

            img_attn, txt_attn = attn[:, : img.shape[1]], attn[:, img.shape[1] :]
        else:
            # run actual attention
            attn = attention(
                torch.cat((txt_q, img_q), dim=2),
                torch.cat((txt_k, img_k), dim=2),
                torch.cat((txt_v, img_v), dim=2),
                pe=pe,
                mask=attn_mask,
            )

            txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1] :]

        # calculate the img blocks
        img = img + apply_mod(
            self.img_attn.proj(img_attn), img_mod1.gate, None, modulation_dims_img
        )
        img = img + apply_mod(
            self.img_mlp(
                apply_mod(
                    self.img_norm2(img),
                    (1 + img_mod2.scale),
                    img_mod2.shift,
                    modulation_dims_img,
                )
            ),
            img_mod2.gate,
            None,
            modulation_dims_img,
        )

        # calculate the txt blocks
        txt += apply_mod(
            self.txt_attn.proj(txt_attn), txt_mod1.gate, None, modulation_dims_txt
        )
        txt += apply_mod(
            self.txt_mlp(
                apply_mod(
                    self.txt_norm2(txt),
                    (1 + txt_mod2.scale),
                    txt_mod2.shift,
                    modulation_dims_txt,
                )
            ),
            txt_mod2.gate,
            None,
            modulation_dims_txt,
        )

        if txt.dtype == torch.float16:
            txt = torch.nan_to_num(txt, nan=0.0, posinf=65504, neginf=-65504)

        return img, txt

    return compass_double_stream_block_forward


class CoMPaSSForFlux:
    def __init__(self):
        pass

    @classmethod
    def INPUT_TYPES(s):
        return {
            "required": {
                "model": ("MODEL",),
            },
        }

    RETURN_TYPES = ("MODEL",)

    FUNCTION = "convert"

    CATEGORY = "Conditioning"

    def check_lazy_status(self, model):
        return [model]

    def convert(self, model: ModelPatcher):
        if model.model.model_type != ModelType.FLUX or not isinstance(
            model.model.diffusion_model, Flux
        ):
            raise ValueError("This node expects a FLUX model")

        # using deepcopy here so that we do not alter the model in-place
        m = deepcopy(model)

        for blk in m.model.diffusion_model.double_blocks:
            if isinstance(blk, DoubleStreamBlock):
                blk.forward = make_forward_fn(blk, blk.hidden_size)

        return (m,)
