from typing import Any, List, Optional, Tuple, Union
from types import MethodType

import torch
import torch.nn as nn
from transformers.models.llama.modeling_llama import (
    LlamaModel,
    LlamaDecoderLayer,
    LlamaForCausalLM,
    LlamaAttention,
    repeat_kv,
)
from transformers.modeling_outputs import (
    CausalLMOutputWithPast,
    BaseModelOutputWithPast,
)
from transformers.cache_utils import DynamicCache
from liger_kernel.transformers.model.llama import lce_maybe_trainable_lm_head
from liger_kernel.transformers.functional import LigerSiLUMulFunction

from ...blocks import (
    QuantBlock,
    QuantLlamaDecoderLayer,
    QuantLlamaMLP,
    QuantLlamaAttn,
    OriginBlock,
)
from ...rmsnorm import QuantRMSNorm
from ...int_matmul import QuantMatMul
from ...softmax import QuantSoftmax
from ...int_linear import QuantLinear
from ...rope import QuantApplyRotaryPosEmb
from ...utils.prepare_model import compile_layers, set_module_names
from .hadamard_utils import random_hadamard_matrix
from utils.overwatch import initialize_overwatch

logger = initialize_overwatch("vla_qat")


class RotateModule(nn.Module):
    def __init__(self, R_init):
        super(RotateModule, self).__init__()
        self.weight = nn.Parameter(R_init.to(torch.float32).to(torch.device("cuda")))

    def forward(self, x, transpose=False):
        if transpose:
            return x @ self.weight
        else:
            return self.weight @ x


class SpinQuantLlamaAttn(QuantLlamaAttn):
    def forward(
        self,
        hidden_states: torch.Tensor,
        position_embeddings: Tuple[torch.Tensor, torch.Tensor],
        attention_mask: Optional[torch.Tensor],
        past_key_value: Optional[torch.Tensor] = None,
        cache_position: Optional[torch.LongTensor] = None,
        R1=None,
        **kwargs,
    ):
        input_shape = hidden_states.shape[:-1]
        hidden_shape = (*input_shape, -1, self.head_dim)

        has_R2 = hasattr(self, "R2") and self.R2 is not None

        query_states, key_states, value_states = torch.split(
            self.qkv_proj(
                hidden_states,
                R1=R1,
                R2=(self.R2.weight if has_R2 else None),
                R2_v_start_dim=(2 * self.qkv_hidden_dim[0] if has_R2 else None),
            ),
            self.qkv_hidden_dim,
            dim=-1,
        )

        query_states = query_states.view(hidden_shape).transpose(1, 2)
        key_states = key_states.view(hidden_shape).transpose(1, 2)
        value_states = value_states.view(hidden_shape).transpose(1, 2)

        cos, sin = position_embeddings
        query_states, key_states = self.apply_rotary_pos_emb(
            query_states, key_states, cos, sin
        )

        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        is_causal = attention_mask is None and query_states.shape[2] > 1

        attn_output = nn.functional.scaled_dot_product_attention(
            query_states.contiguous(),
            key_states.contiguous(),
            value_states.contiguous(),
            attn_mask=(
                attention_mask[:, :, :, : key_states.shape[-2]]
                if attention_mask is not None
                else None
            ),
            is_causal=is_causal,
        )

        attn_output = attn_output.transpose(1, 2).contiguous()

        attn_output = attn_output.reshape(*input_shape, -1).contiguous()
        attn_output = self.o_proj(
            attn_output, R1=R1, R2=(self.R2.weight if has_R2 else None), transpose=True
        )
        return (attn_output, None)


class SpinQuantQKVLlamaAttn(QuantBlock):
    def __init__(self, org_attn: LlamaAttention, quant_args):
        # QuantModuleNoLinear.__init__(self,org_attn, quant_args)
        super().__init__()

        self.org_attn = OriginBlock(org_attn)
        self.config = org_attn.config
        self.layer_idx = org_attn.layer_idx
        self.head_dim = org_attn.head_dim
        self.num_key_value_groups = org_attn.num_key_value_groups
        self.scaling = org_attn.scaling

        self.q_proj = QuantLinear(
            org_attn.q_proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )
        self.k_proj = QuantLinear(
            org_attn.k_proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )
        self.qkt_mm = QuantMatMul(quant_args.q_quant_params, quant_args.k_quant_params)

        self.softmax = QuantSoftmax(quant_args.softmax_input_params)

        self.v_proj = QuantLinear(
            org_attn.v_proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )
        self.pv_mm = QuantMatMul(quant_args.p_quant_params, quant_args.v_quant_params)

        self.o_proj = QuantLinear(
            org_attn.o_proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )

        self.apply_rotary_pos_emb = QuantApplyRotaryPosEmb(
            quant_args.pos_emb_quant_params
        )

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_embeddings: Tuple[torch.Tensor, torch.Tensor],
        attention_mask: Optional[torch.Tensor],
        past_key_value: Optional[torch.Tensor] = None,
        cache_position: Optional[torch.LongTensor] = None,
        R1=None,
        **kwargs,
    ):
        input_shape = hidden_states.shape[:-1]
        hidden_shape = (*input_shape, -1, self.head_dim)

        has_R2 = hasattr(self, "R2") and self.R2 is not None

        query_states = self.q_proj(hidden_states, R1=R1)
        key_states = self.k_proj(hidden_states, R1=R1)
        value_states = self.v_proj(
            hidden_states, R1=R1, R2=(self.R2.weight if has_R2 else None)
        )

        query_states = query_states.view(hidden_shape).transpose(1, 2)
        key_states = key_states.view(hidden_shape).transpose(1, 2)
        value_states = value_states.view(hidden_shape).transpose(1, 2)

        cos, sin = position_embeddings
        query_states, key_states = self.apply_rotary_pos_emb(
            query_states, key_states, cos, sin
        )

        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        is_causal = attention_mask is None and query_states.shape[2] > 1

        attn_output = nn.functional.scaled_dot_product_attention(
            query_states.contiguous(),
            key_states.contiguous(),
            value_states.contiguous(),
            attn_mask=(
                attention_mask[:, :, :, : key_states.shape[-2]]
                if attention_mask is not None
                else None
            ),
            is_causal=is_causal,
        )

        attn_output = attn_output.transpose(1, 2).contiguous()

        attn_output = attn_output.reshape(*input_shape, -1).contiguous()
        attn_output = self.o_proj(
            attn_output, R1=R1, R2=(self.R2.weight if has_R2 else None), transpose=True
        )
        return (attn_output, None)


class SpinQuantLlamaMLP(QuantLlamaMLP):
    def forward(self, x, *, R1=None):
        a1 = self.up_proj(x, R1=R1)
        a2 = self.gate_proj(x, R1=R1)
        inter = LigerSiLUMulFunction.apply(a2, a1)
        # inter = nn.functional.silu(a2) * a1
        down_proj = self.down_proj(
            inter,
            R1=R1,
            transpose=True,
        )
        return down_proj


class SpinQuantLlamaDecoderLayer(QuantLlamaDecoderLayer):
    def __init__(self, org_layer: LlamaDecoderLayer, quant_args):
        nn.Module.__init__(self)
        self.org_layer = OriginBlock(org_layer)

        self.hidden_size = org_layer.hidden_size

        self.self_attn = SpinQuantQKVLlamaAttn(org_layer.self_attn, quant_args)

        self.mlp = SpinQuantLlamaMLP(org_layer.mlp, quant_args)
        self.input_layernorm = QuantRMSNorm(
            org_layer.input_layernorm, quant_args.layernorm_input_quant_params
        )
        self.post_attention_layernorm = QuantRMSNorm(
            org_layer.post_attention_layernorm, quant_args.layernorm_input_quant_params
        )

    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_value: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = False,
        use_cache: Optional[bool] = False,
        cache_position: Optional[torch.LongTensor] = None,
        position_embeddings: Optional[
            Tuple[torch.Tensor, torch.Tensor]
        ] = None,  # necessary, but kept here for BC
        R1=None,
        **kwargs,
    ) -> Tuple[
        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
    ]:
        use_cache = False
        assert past_key_value is None or past_key_value.__len__() == 0, (
            f"Can't handle kvcache in quant model! Receive {past_key_value=} {past_key_value.__len__()}"
        )
        org_hidden_state = hidden_states
        residual = hidden_states
        hidden_states = self.input_layernorm(hidden_states)

        # Self Attention
        hidden_states, self_attn_weights = self.self_attn(
            hidden_states=hidden_states,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_value=past_key_value,
            output_attentions=output_attentions,
            use_cache=use_cache,
            cache_position=cache_position,
            position_embeddings=position_embeddings,
            R1=R1,
            **kwargs,
        )
        hidden_states = residual + hidden_states

        # Fully Connected
        residual = hidden_states

        hidden_states = self.post_attention_layernorm(hidden_states)
        hidden_states = self.mlp(hidden_states, R1=R1)
        hidden_states = residual + hidden_states

        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights,)

        return outputs


def forward_with_R1_lm(
    self,
    input_ids: torch.LongTensor = None,
    attention_mask: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.LongTensor] = None,
    past_key_values: Optional[Union[Any, List[torch.FloatTensor]]] = None,
    inputs_embeds: Optional[torch.FloatTensor] = None,
    labels: Optional[torch.LongTensor] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    cache_position: Optional[torch.LongTensor] = None,
    logits_to_keep: Union[int, torch.Tensor] = 0,
    **kwargs,
) -> Union[Tuple, Any]:
    output_attentions = (
        output_attentions
        if output_attentions is not None
        else self.config.output_attentions
    )
    output_hidden_states = (
        output_hidden_states
        if output_hidden_states is not None
        else self.config.output_hidden_states
    )
    return_dict = (
        return_dict if return_dict is not None else self.config.use_return_dict
    )

    # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
    outputs = self.model(
        input_ids=input_ids,
        attention_mask=attention_mask,
        position_ids=position_ids,
        past_key_values=past_key_values,
        inputs_embeds=inputs_embeds,
        use_cache=use_cache,
        output_attentions=output_attentions,
        output_hidden_states=output_hidden_states,
        return_dict=return_dict,
        cache_position=cache_position,
        R1=(self.R1.weight if hasattr(self, "R1") and self.R1 is not None else None),
        **kwargs,
    )

    hidden_states = outputs[0]
    if self.R1 is not None:
        dtype = hidden_states.dtype
        hidden_states = (
            hidden_states.to(torch.float64) @ self.R1.weight.T.to(torch.float64)
        ).to(dtype)
    # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
    slice_indices = (
        slice(-logits_to_keep, None)
        if isinstance(logits_to_keep, int)
        else logits_to_keep
    )
    kept_hidden_states = hidden_states[:, slice_indices, :]

    shift_labels = kwargs.pop("shift_labels", None)
    logits = None
    loss = None
    # if in training mode, don't materialize logits
    if self.training and (labels is not None or shift_labels is not None):
        loss = lce_maybe_trainable_lm_head(
            self,
            hidden_states=kept_hidden_states,
            hidden_size=self.config.hidden_size,
            labels=labels,
            shift_labels=shift_labels,
            **kwargs,
        )

    else:  # if in inference mode materialize logits
        logits = self.lm_head(kept_hidden_states)
        if labels is not None:
            loss = self.loss_function(
                logits=logits,
                labels=labels,
                vocab_size=self.config.vocab_size,
                **kwargs,
            )

    if not return_dict:
        output = (logits,) + outputs[1:]
        return (loss,) + output if loss is not None else output

    return CausalLMOutputWithPast(
        loss=loss,
        logits=logits,
        past_key_values=outputs.past_key_values,
        hidden_states=outputs.hidden_states,
        attentions=outputs.attentions,
    )


def forward_with_R1_model(
    self,
    input_ids: torch.LongTensor = None,
    attention_mask: Optional[torch.Tensor] = None,
    position_ids: Optional[torch.LongTensor] = None,
    past_key_values: Optional[Any] = None,
    inputs_embeds: Optional[torch.FloatTensor] = None,
    use_cache: Optional[bool] = None,
    output_attentions: Optional[bool] = None,
    output_hidden_states: Optional[bool] = None,
    return_dict: Optional[bool] = None,
    cache_position: Optional[torch.LongTensor] = None,
    R1=None,
    **flash_attn_kwargs: Any,
) -> Union[Tuple, BaseModelOutputWithPast]:
    output_attentions = (
        output_attentions
        if output_attentions is not None
        else self.config.output_attentions
    )
    output_hidden_states = (
        output_hidden_states
        if output_hidden_states is not None
        else self.config.output_hidden_states
    )
    use_cache = use_cache if use_cache is not None else self.config.use_cache
    return_dict = (
        return_dict if return_dict is not None else self.config.use_return_dict
    )

    if (input_ids is None) ^ (inputs_embeds is not None):
        raise ValueError("You must specify exactly one of input_ids or inputs_embeds")

    if self.gradient_checkpointing and self.training and use_cache:
        logger.warning_once(
            "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
        )
        use_cache = False

    if inputs_embeds is None:
        inputs_embeds = self.embed_tokens(input_ids)

    if R1 is not None:
        dtype = inputs_embeds.dtype
        inputs_embeds = (inputs_embeds.to(torch.float64) @ R1.to(torch.float64)).to(
            dtype
        )
    if use_cache and past_key_values is None:
        past_key_values = DynamicCache()

    if cache_position is None:
        past_seen_tokens = (
            past_key_values.get_seq_length() if past_key_values is not None else 0
        )
        cache_position = torch.arange(
            past_seen_tokens,
            past_seen_tokens + inputs_embeds.shape[1],
            device=inputs_embeds.device,
        )

    if position_ids is None:
        position_ids = cache_position.unsqueeze(0)

    causal_mask = self._update_causal_mask(
        attention_mask,
        inputs_embeds,
        cache_position,
        past_key_values,
        output_attentions,
    )

    hidden_states = inputs_embeds

    # create position embeddings to be shared across the decoder layers
    position_embeddings = self.rotary_emb(hidden_states, position_ids)

    # decoder layers
    all_hidden_states = () if output_hidden_states else None
    all_self_attns = () if output_attentions else None

    for decoder_layer in self.layers[: self.config.num_hidden_layers]:
        if output_hidden_states:
            all_hidden_states += (hidden_states,)

        if self.gradient_checkpointing and self.training:
            layer_outputs = self._gradient_checkpointing_func(
                decoder_layer.__call__,
                hidden_states,
                causal_mask,
                position_ids,
                past_key_values,
                output_attentions,
                use_cache,
                cache_position,
                position_embeddings,
                R1=R1,
            )
        else:
            layer_outputs = decoder_layer(
                hidden_states,
                attention_mask=causal_mask,
                position_ids=position_ids,
                past_key_value=past_key_values,
                output_attentions=output_attentions,
                use_cache=use_cache,
                cache_position=cache_position,
                position_embeddings=position_embeddings,
                R1=R1,
                **flash_attn_kwargs,
            )

        hidden_states = layer_outputs[0]

        if output_attentions:
            all_self_attns += (layer_outputs[1],)

    hidden_states = self.norm(hidden_states)

    # add hidden states from the last decoder layer
    if output_hidden_states:
        all_hidden_states += (hidden_states,)

    output = BaseModelOutputWithPast(
        last_hidden_state=hidden_states,
        past_key_values=past_key_values if use_cache else None,
        hidden_states=all_hidden_states,
        attentions=all_self_attns,
    )
    return output if return_dict else output.to_tuple()


def add_R1(model: LlamaForCausalLM):
    R1: torch.Tensor = random_hadamard_matrix(model.config.hidden_size, "cuda")
    # model.R1 = None
    model.R1 = RotateModule(R1)
    model.forward = MethodType(forward_with_R1_lm, model)

    model.model.forward = MethodType(forward_with_R1_model, model.model)
    return model


def add_R2(model: LlamaForCausalLM):
    for layer in model.model.layers:
        R2 = random_hadamard_matrix(
            model.config.hidden_size // model.config.num_attention_heads, "cuda"
        )
        # layer.self_attn.R2 = None
        layer.self_attn.R2 = RotateModule(R2)
    return model


def spin_quant_llama_model(model: LlamaForCausalLM, quant_args):
    QuantLlamaAttn._forward = QuantLlamaAttn.forward
    QuantLlamaAttn.forward = QuantLlamaAttn.forward_linear
    QuantLlamaDecoderLayer._forward = QuantLlamaDecoderLayer.forward
    QuantLlamaDecoderLayer.forward = QuantLlamaDecoderLayer.forward_linear
    model.model.config.use_cache = False
    model.model.layers = nn.ModuleList(
        [
            SpinQuantLlamaDecoderLayer(org_layer, quant_args)
            for org_layer in model.model.layers
        ]
    )

    model.model.norm = QuantRMSNorm(
        model.model.norm,
        input_quant_params=quant_args.layernorm_input_quant_params,
        output_quant_params=quant_args.layernorm_output_quant_params,
    )

    return model


def build_spin_train_blocks(model: nn.Module, qat_args, compile_train=True):
    logger.info("build spin quant llama blocks for train")

    model.language_model = spin_quant_llama_model(model.language_model, qat_args)
    set_module_names(model)

    for p in model.parameters():
        p.requires_grad = False

    logger.info("add spin matrix R1")
    model.language_model = add_R1(model.language_model)

    logger.info("add spin matrix R2")
    model.language_model = add_R2(model.language_model)

    for name, m in model.named_modules():
        if isinstance(m, QuantLinear):
            m.online_quant = True
        logger.debug(f"make linear {name} online_quant")

    if compile_train:
        model = compile_layers(model)
    return model.cuda()
