import torch
import torch.nn as nn
from quantize.quantizer import create_quantizer
from quantize.base import QuantModuleNoLinear

try:
    from liger_kernel.transformers.rope import liger_rotary_pos_emb as apply_rotary_pos_emb
    # apply_rotary_pos_emb = torch.compiler.disable(apply_rotary_pos_emb)
except ImportError:
    from transformers.models.llama.modeling_llama import apply_rotary_pos_emb

class ApplyRotaryPosEmb(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(
        self, q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
    ):
        return apply_rotary_pos_emb(q, k, cos, sin)


class QuantApplyRotaryPosEmb(QuantModuleNoLinear):
    def __init__(
        self,
        quant_params: dict = {},
    ):
        super().__init__(ApplyRotaryPosEmb())
        self.use_act_quant = False
        self.query_quantizer = create_quantizer(**quant_params)
        self.key_quantizer = create_quantizer(**quant_params)

    def f(
        self, q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
    ):
        # self.use_act_quant = False
        if self.use_act_quant:
            q = (
                self.query_quantizer(q.transpose(1, 2).flatten(-2))
                .unflatten(-1, (32, 128))
                .transpose(1, 2)
            )
            k = (
                self.key_quantizer(k.transpose(1, 2).flatten(-2))
                .unflatten(-1, (32, 128))
                .transpose(1, 2)
            )
        out = apply_rotary_pos_emb(q, k, cos, sin)
        return out
