import copy
from types import MethodType
import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.base import QuantModule
from quantize.conv2d import QuantConv2d
from quantize.gelu import QuantGELU
from quantize.int_linear import QuantLinear
from quantize.int_matmul import QuantMatMul
from quantize.layernorm import QuantLayerNorm
from quantize.silu_ewm import QuantFusedSiLU
from quantize.rope import QuantApplyRotaryPosEmb
from quantize.rmsnorm import QuantRMSNorm
from quantize.softmax import QuantSoftmax

from quantize.config import QuantizationConfigArgs

from quantize.models.modeling_vit import (
    MLP,
    Attntion,
    LayerScale,
    Block,
    VLAProjector,
    ViT,
    MLPLinear,
)

from transformers.models.llama.modeling_llama import (
    LlamaMLP,
    LlamaAttention,
    LlamaDecoderLayer,
    LlamaModel,
    repeat_kv,
)
from typing import Callable, Iterator, List, Optional, Set, Tuple, Union
from liger_kernel.transformers.rope import liger_rotary_pos_emb as apply_rotary_pos_emb
from liger_kernel.transformers.swiglu import LigerSiLUMulFunction


class OriginBlock:
    def __init__(self, org_module: nn.Module):
        self.org_module = org_module

    def __call__(self, *args, **kwds):
        return self.org_module(*args, **kwds)

    def __getattr__(self, name):
        try:
            return super.__getattr__(name)
        except AttributeError:
            return getattr(self.org_module, name)


class QuantBlock(nn.Module):
    pass


class QuantLlamaMLP(QuantBlock):
    def __init__(self, org_mlp: LlamaMLP, quant_args):
        super().__init__()
        self.org_mlp = OriginBlock(org_mlp)

        self.gate_proj = QuantLinear(
            org_mlp.gate_proj,
            quant_args.weight_quant_params,
            quant_args.mlp_act_quant_params,
        )
        self.up_proj = QuantLinear(
            org_mlp.up_proj,
            quant_args.weight_quant_params,
            # quant_args.mlp_act_quant_params,
            act_quantizer=self.gate_proj.act_quantizer,
        )
        self.down_proj = QuantLinear(
            org_mlp.down_proj,
            quant_args.weight_quant_params,
            quant_args.mlp_act_quant_params,
        )

        self.fused_silu = QuantFusedSiLU(
            sigmoid_quant_params=quant_args.sigmoid_quant_params,
            hadamard2_a1_quant_params=quant_args.hadamard2_a1_quant_params,
            hadamard1_a2_quant_params=quant_args.hadamard1_a2_quant_params,
            hadamard2_act_quant_params=quant_args.hadamard2_act_quant_params,
        )

    def forward(self, x):
        # assert not torch.isnan(x).any(), f"x {self.name}"

        a1 = self.up_proj(x)
        # assert not torch.isnan(a1).any(), f"a1 {self.name}"

        a2 = self.gate_proj(x)
        # assert not torch.isnan(a2).any(), f"a2 {self.name}"

        inter = self.fused_silu(a1, a2)
        # assert not torch.isnan(x).any(), f"attn {self.name}"
        # assert not torch.isnan(a1).any(), f"attn {self.name}"
        # assert not torch.isnan(a2).any(), (
        #     f"attn {self.name} {[(n, p) for n, p in self.gate_proj.named_parameters() if torch.isnan(p).any()]}"
        # )
        # assert not torch.isnan(inter).any(), f"attn {self.name}"

        down_proj = self.down_proj(inter)
        # assert not torch.isnan(down_proj).any(), f"attn {self.name}"

        # with torch.no_grad():
        #     l1 = torch.nn.functional.mse_loss(a1, self.org_mlp.up_proj(x))
        #     l2 = torch.nn.functional.mse_loss(a2, self.org_mlp.gate_proj(x))
        #     l3 = torch.nn.functional.mse_loss(
        #         inter,
        #         self.org_mlp.up_proj(x)
        #         * torch.nn.functional.silu(self.org_mlp.gate_proj(x)),
        #     )
        #     loss = (
        #         torch.nn.functional.mse_loss(
        #             self.org_mlp(x),
        #             down_proj,
        #         )
        #         .detach()
        #         .cpu()
        #         .item()
        #     )
        # print(f"{self.name}: {l1} {l2} {l3} {loss}")

        return down_proj

    def forward_linear(self, x):
        return self.down_proj(
            LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x))
        )


def eager_attention_forward(
    module: nn.Module,
    query: torch.Tensor,
    key: torch.Tensor,
    value: torch.Tensor,
    attention_mask: Optional[torch.Tensor],
    scaling: float,
    dropout: float = 0.0,
    **kwargs,
):
    key_states = repeat_kv(key, module.num_key_value_groups)
    value_states = repeat_kv(value, module.num_key_value_groups)

    attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
    if attention_mask is not None:
        causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
        attn_weights = attn_weights + causal_mask

    attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(
        query.dtype
    )
    attn_weights = nn.functional.dropout(
        attn_weights, p=dropout, training=module.training
    )
    attn_output = torch.matmul(attn_weights, value_states)
    attn_output = attn_output.transpose(1, 2).contiguous()

    return attn_output, attn_weights


class QuantLlamaAttn(QuantBlock):
    def __init__(self, org_attn: LlamaAttention, quant_args):
        super().__init__()
        self.org_attn = OriginBlock(org_attn)
        self.config = org_attn.config
        self.layer_idx = org_attn.layer_idx
        self.head_dim = org_attn.head_dim
        self.num_key_value_groups = org_attn.num_key_value_groups
        self.scaling = org_attn.scaling

        org_attn.qkv_proj = torch.nn.Linear(
            org_attn.q_proj.in_features, 3 * org_attn.q_proj.out_features, False
        ).cuda()

        org_attn.qkv_proj.weight.data.copy_(
            torch.cat(
                [
                    org_attn.q_proj.weight.data.cuda(),
                    org_attn.k_proj.weight.data.cuda(),
                    org_attn.v_proj.weight.data.cuda(),
                ],
                dim=0,
            )
        )
        # org_attn.org_qkv.weight.data.requires_grad = False

        assert org_attn.q_proj.bias is None, org_attn.layer_idx
        # Llama 的 qkv 没有 bias ？
        # org_qkv.bias.copy_(
        #     torch.cat(
        #         [
        #             org_attn.q_proj.bias.data,
        #             org_attn.k_proj.bias.data,
        #             org_attn.v_proj.bias.data,
        #         ],
        #         dim=0,
        #     )
        # )

        self.qkv_hidden_dim = (
            org_attn.q_proj.weight.data.shape[0],
            org_attn.k_proj.weight.data.shape[0],
            org_attn.v_proj.weight.data.shape[0],
        )
        org_attn.qkv_hidden_dim = self.qkv_hidden_dim

        self.qkv_proj = QuantLinear(
            org_attn.qkv_proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )

        # self.q_proj = QuantLinear(
        #     org_attn.q_proj,
        #     quant_args.weight_quant_params,
        #     quant_args.act_quant_params,
        # )
        # self.k_proj = QuantLinear(
        #     org_attn.k_proj,
        #     quant_args.weight_quant_params,
        #     quant_args.act_quant_params,
        # )
        self.qkt_mm = QuantMatMul(quant_args.q_quant_params, quant_args.k_quant_params)
        # self.qkt_mm = torch.matmul

        self.softmax = QuantSoftmax(quant_args.softmax_input_params)

        # self.v_proj = QuantLinear(
        #     org_attn.v_proj,
        #     quant_args.weight_quant_params,
        #     quant_args.act_quant_params,
        # )
        # self.pv_mm = torch.matmul
        self.pv_mm = QuantMatMul(quant_args.p_quant_params, quant_args.v_quant_params)

        self.o_proj = QuantLinear(
            org_attn.o_proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )

        self.apply_rotary_pos_emb = QuantApplyRotaryPosEmb(
            quant_args.pos_emb_quant_params
        )
        del org_attn.q_proj
        del org_attn.k_proj
        del org_attn.v_proj
        org_attn.forward = MethodType(QuantLlamaAttn.forward_linear, org_attn)

    def forward(
        self,
        hidden_states: torch.Tensor,
        position_embeddings: Tuple[torch.Tensor, torch.Tensor],
        attention_mask: Optional[torch.Tensor],
        past_key_value: Optional[torch.Tensor] = None,
        cache_position: Optional[torch.LongTensor] = None,
        **kwargs,
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        input_shape = hidden_states.shape[:-1]
        hidden_shape = (*input_shape, -1, self.head_dim)

        query_states, key_states, value_states = torch.split(
            self.qkv_proj(hidden_states), self.qkv_hidden_dim, dim=-1
        )

        query_states = query_states.view(hidden_shape).transpose(1, 2)
        key_states = key_states.view(hidden_shape).transpose(1, 2)
        value_states = value_states.view(hidden_shape).transpose(1, 2)

        cos, sin = position_embeddings
        query_states, key_states = self.apply_rotary_pos_emb(
            query_states, key_states, cos, sin
        )

        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)

        attn_weights = (
            self.qkt_mm(query_states, key_states.transpose(2, 3)) * self.scaling
        )

        causal_mask = None
        if attention_mask is not None:
            causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
        else:
            causal_mask = torch.zeros(
                attn_weights.shape[-2:],
                dtype=attn_weights.dtype,
                device=attn_weights.device,
            )
            mask = torch.ones(
                attn_weights.shape[-2:],
                dtype=torch.bool,
                device=attn_weights.device,
            ).tril_(diagonal=0)
            causal_mask.masked_fill_(mask.logical_not(), float("-inf"))

        attn_weights = self.softmax(attn_weights, causal_mask)
        attn_output = self.pv_mm(attn_weights, value_states)
        attn_output = attn_output.transpose(1, 2).contiguous()

        # attn_output_t = attn_output

        attn_output = attn_output.reshape(*input_shape, -1).contiguous()
        attn_output = self.o_proj(attn_output)

        # with torch.no_grad():
        #     query_states_o, key_states_o = apply_rotary_pos_emb(
        #         self.org_attn.q_proj(hidden_states).view(hidden_shape).transpose(1, 2),
        #         self.org_attn.k_proj(hidden_states).view(hidden_shape).transpose(1, 2),
        #         cos,
        #         sin,
        #     )
        #     value_states_o = (
        #         self.org_attn.v_proj(hidden_states).view(hidden_shape).transpose(1, 2)
        #     )

        #     # attn_output_o, _ = eager_attention_forward(
        #     #     self.org_attn.org_module,
        #     #     query_states_o,
        #     #     key_states_o,
        #     #     value_states_o,
        #     #     attention_mask,
        #     #     dropout=0.0,
        #     #     scaling=self.org_attn.scaling,
        #     # )

        #     causal_mask = attention_mask
        #     if attention_mask is not None:
        #         causal_mask = causal_mask[:, :, :, : key_states_o.shape[-2]]

        #     attn_output_o = (
        #         F.scaled_dot_product_attention(
        #             query_states_o.contiguous(),
        #             key_states_o.contiguous(),
        #             value_states_o.contiguous(),
        #             attn_mask=causal_mask,
        #             # scale=self.org_attn.scaling,
        #         )
        #         .transpose(1, 2)
        #         .contiguous()
        #     )
        #     # a_W = query_states_o @ key_states_o.transpose(2, 3) * self.org_attn.scaling
        #     # print(
        #     #     f"{self.name} attn loss: {torch.nn.functional.mse_loss(attn_output, attn_output_o)}"
        #     # )
        #     attn_output_o = self.org_attn.o_proj(
        #         attn_output_o.reshape(*input_shape, -1).contiguous()
        #     )
        #     print(
        #         f"{self.name} attn loss: {torch.nn.functional.mse_loss(attn_output, attn_output_o)}"
        #     )

        # with torch.no_grad():
        #     assert past_key_value is None
        #     p_ls = []
        #     loss = (
        #         torch.nn.functional.mse_loss(
        #             self.org_attn(
        #                 hidden_states,
        #                 position_embeddings,
        #                 attention_mask,
        #                 past_key_value,
        #                 cache_position,
        #                 p_ls=p_ls,
        #                 **kwargs,
        #             )[0],
        #             attn_output,
        #         )
        #         .detach()
        #         .cpu()
        #         .item()
        #     )
        # print(f"{self.name}: {loss}")

        return (attn_output, None)

    def forward_linear(
        self,
        hidden_states: torch.Tensor,
        position_embeddings: Tuple[torch.Tensor, torch.Tensor],
        attention_mask: Optional[torch.Tensor],
        past_key_value: Optional[torch.Tensor] = None,
        cache_position: Optional[torch.LongTensor] = None,
        **kwargs,
    ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
        input_shape = hidden_states.shape[:-1]
        hidden_shape = (*input_shape, -1, self.head_dim)
        # assert not torch.isnan(hidden_states).any(), f"attn {self.name}"

        query_states, key_states, value_states = torch.split(
            self.qkv_proj(hidden_states), self.qkv_hidden_dim, dim=-1
        )

        query_states = query_states.view(hidden_shape).transpose(1, 2)
        key_states = key_states.view(hidden_shape).transpose(1, 2)
        value_states = value_states.view(hidden_shape).transpose(1, 2)

        cos, sin = position_embeddings
        query_states, key_states = apply_rotary_pos_emb(
            query_states, key_states, cos, sin
        )

        key_states = repeat_kv(key_states, self.num_key_value_groups)
        value_states = repeat_kv(value_states, self.num_key_value_groups)
        use_causal: bool = attention_mask is None
        # assert not torch.isnan(query_states).any(), f"attn {self.name}"
        # assert not torch.isnan(value_states).any(), f"attn {self.name}"
        # assert not torch.isnan(key_states).any(), f"attn {self.name}"

        attn_output = nn.functional.scaled_dot_product_attention(
            query_states.contiguous(),
            key_states.contiguous(),
            value_states.contiguous(),
            attn_mask=(
                attention_mask[:, :, :, : key_states.shape[-2]]
                if attention_mask is not None
                else None
            ),
            is_causal=use_causal,
        )
        # assert not torch.isnan(attn_output).any(), f"attn {self.name}"

        attn_output = attn_output.transpose(1, 2).contiguous()

        attn_output = attn_output.reshape(*input_shape, -1).contiguous()
        attn_output = self.o_proj(attn_output)
        return (attn_output, None)


class QuantLlamaDecoderLayer(QuantBlock):
    def __init__(self, org_layer: LlamaDecoderLayer, quant_args, diff_loss=False):
        super().__init__()
        self.org_layer = OriginBlock(org_layer)

        self.hidden_size = org_layer.hidden_size

        self.self_attn = QuantLlamaAttn(org_layer.self_attn, quant_args)

        self.mlp = QuantLlamaMLP(org_layer.mlp, quant_args)
        self.input_layernorm = QuantRMSNorm(
            org_layer.input_layernorm, quant_args.layernorm_input_quant_params
        )
        self.post_attention_layernorm = QuantRMSNorm(
            org_layer.post_attention_layernorm, quant_args.layernorm_input_quant_params
        )
        self.diff_loss = diff_loss
        self.loss: torch.Tensor | None = None

    def forward(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_value: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = False,
        use_cache: Optional[bool] = False,
        cache_position: Optional[torch.LongTensor] = None,
        position_embeddings: Optional[
            Tuple[torch.Tensor, torch.Tensor]
        ] = None,  # necessary, but kept here for BC
        **kwargs,
    ) -> Tuple[
        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
    ]:
        # torch.cuda.empty_cache()

        use_cache = False
        assert past_key_value is None or past_key_value.__len__() == 0, (
            f"Can't handle kvcache in quant model! Receive {past_key_value=} {past_key_value.__len__()}"
        )
        org_hidden_state = hidden_states
        residual = hidden_states
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        hidden_states = self.input_layernorm(hidden_states, output_residual=False)
        # hidden_states, residual = self.input_layernorm(
        #     hidden_states, output_residual=False
        # )
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        # Self Attention
        hidden_states, self_attn_weights = self.self_attn(
            hidden_states=hidden_states,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_value=past_key_value,
            output_attentions=output_attentions,
            use_cache=use_cache,
            cache_position=cache_position,
            position_embeddings=position_embeddings,
            **kwargs,
        )
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        hidden_states = residual + hidden_states
        # torch.cuda.empty_cache()

        # Fully Connected
        residual = hidden_states
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        hidden_states = self.post_attention_layernorm(
            hidden_states, output_residual=False
        )
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        # hidden_states, residual = self.post_attention_layernorm(
        #     hidden_states, output_residual=False
        # )
        hidden_states = self.mlp(hidden_states)
        # assert not torch.isnan(hidden_states).any(), f"mlp {self.name}"
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        hidden_states = residual + hidden_states
        # assert not torch.isnan(hidden_states).any(), f"mlp {self.name}"

        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights,)

        # with torch.no_grad():
        #     loss = (
        #         torch.nn.functional.mse_loss(
        #             self.org_layer(
        #                 hidden_states=org_hidden_state,
        #                 attention_mask=attention_mask,
        #                 position_ids=position_ids,
        #                 past_key_value=past_key_value,
        #                 output_attentions=output_attentions,
        #                 use_cache=use_cache,
        #                 cache_position=cache_position,
        #                 position_embeddings=position_embeddings,
        #                 **kwargs,
        #             )[0],
        #             outputs[0],
        #         )
        #         .detach()
        #         .cpu()
        #         .item()
        #     )
        # print(f"{self.name}: {loss}")
        # if self.diff_loss:
        #     self.loss = torch.nn.functional.mse_loss(
        #         self.org_layer(
        #             hidden_states=hidden_states,
        #             attention_mask=attention_mask,
        #             position_ids=position_ids,
        #             past_key_value=past_key_value,
        #             output_attentions=output_attentions,
        #             use_cache=use_cache,
        #             cache_position=cache_position,
        #             position_embeddings=position_embeddings,  # necessary, but kept here for BC
        #             **kwargs,
        #         )[0],
        #         outputs[0]
        #     )
        return outputs

    def forward_linear(
        self,
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        position_ids: Optional[torch.LongTensor] = None,
        past_key_value: Optional[torch.LongTensor] = None,
        output_attentions: Optional[bool] = False,
        use_cache: Optional[bool] = False,
        cache_position: Optional[torch.LongTensor] = None,
        position_embeddings: Optional[
            Tuple[torch.Tensor, torch.Tensor]
        ] = None,  # necessary, but kept here for BC
        **kwargs,
    ) -> Tuple[
        torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]
    ]:
        use_cache = False
        assert past_key_value is None or past_key_value.__len__() == 0, (
            f"Can't handle kvcache in quant model! Receive {past_key_value=} {past_key_value.__len__()}"
        )
        org_hidden_state = hidden_states
        residual = hidden_states
        hidden_states = self.input_layernorm(hidden_states)

        # Self Attention
        hidden_states, self_attn_weights = self.self_attn(
            hidden_states=hidden_states,
            attention_mask=attention_mask,
            position_ids=position_ids,
            past_key_value=past_key_value,
            output_attentions=output_attentions,
            use_cache=use_cache,
            cache_position=cache_position,
            position_embeddings=position_embeddings,
            **kwargs,
        )
        hidden_states = residual + hidden_states

        # Fully Connected
        residual = hidden_states

        hidden_states = self.post_attention_layernorm(hidden_states)
        # assert not torch.isnan(hidden_states).any(), f"ln {self.name}"

        hidden_states = self.mlp(hidden_states)
        # assert not torch.isnan(hidden_states).any(), f"mlp {self.name}"
        hidden_states = residual + hidden_states
        # assert not torch.isnan(hidden_states).any(), f"res {self.name}"

        outputs = (hidden_states,)
        if output_attentions:
            outputs += (self_attn_weights,)

        if self.diff_loss:
            self.loss = torch.nn.functional.mse_loss(
                self.org_layer(
                    hidden_states=hidden_states,
                    attention_mask=attention_mask,
                    position_ids=position_ids,
                    past_key_value=past_key_value,
                    output_attentions=output_attentions,
                    use_cache=use_cache,
                    cache_position=cache_position,
                    position_embeddings=position_embeddings,  # necessary, but kept here for BC
                    **kwargs,
                )[0],
                outputs[0],
            )

        return outputs


class QuantVitAttn(QuantBlock):
    def __init__(self, org_attn: Attntion, quant_args):
        super().__init__()

        self.org_attn = OriginBlock(org_attn)
        self.num_heads = org_attn.num_heads
        self.head_dim = org_attn.head_dim
        self.scale = org_attn.scale

        self.qkv = QuantLinear(
            org_attn.qkv,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )

        self.proj = QuantLinear(
            org_attn.proj,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        )

        self.qkt_mm = QuantMatMul(quant_args.q_quant_params, quant_args.k_quant_params)

        # self.softmax = lambda x, _: F.softmax(x, dim=-1, dtype=torch.float32).to(
        #     x.dtype
        # )
        self.softmax = QuantSoftmax(quant_args.softmax_input_params)

        self.pv_mm = QuantMatMul(quant_args.p_quant_params, quant_args.v_quant_params)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        B, N, C = x.shape
        qkv_state = (
            self.qkv(x)
            .reshape(B, N, 3, self.num_heads, self.head_dim)
            .permute(2, 0, 3, 1, 4)
        )
        q, k, v = qkv_state.unbind(0)

        # quant attntion
        attn = self.qkt_mm(q, k.transpose(-2, -1))

        attn = self.softmax(attn * self.scale, attn_mask=None)

        x = self.pv_mm(attn, v)

        x = x.transpose(1, 2).reshape(B, N, C)

        x = self.proj(x)

        return x

    def forward_linear(self, x: torch.Tensor) -> torch.Tensor:
        B, N, C = x.shape

        B, N, C = x.shape
        qkv = (
            self.qkv(x)
            .reshape(B, N, 3, self.num_heads, self.head_dim)
            .permute(2, 0, 3, 1, 4)
        )
        q, k, v = qkv.unbind(0)

        x = nn.functional.scaled_dot_product_attention(q, k, v)

        x = x.transpose(1, 2).reshape(B, N, C)

        x = self.proj(x)

        return x


class VitDiffLossWrapper(nn.Module):
    def __init__(
        self,
        org_module: torch.nn.Module,
        quant_module: torch.nn.Module,
        trainable=True,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
        # self.register_buffer("diff_loss",torch.tensor(0.0,dtype=torch.bfloat16),persistent=False)
        self.diff_loss = 0
        self._m = [
            org_module.bfloat16().cuda(),
            quant_module.bfloat16().cuda(),
        ]
        self._modules = quant_module._modules
        self._parameters = quant_module._parameters
        self._buffers = quant_module._buffers
        self._trainable = trainable
        # self.blocks = getattr(quant_module, "blocks",None)
        for name, p in tuple(self._m[0].named_parameters()):
            m_names = name.split(".")
            m = self._m[0]
            for n in m_names[:-1]:
                if hasattr(m, n):
                    m = getattr(m, n)
            # if not isinstance(m,QuantLinear):
            n = m_names[-1] if len(m_names) > 1 else name
            # print(f"del {m} {n}")
            # logger.info(f"del {name} {n}")
            delattr(m, n)
            p.data.requires_grad = False
            m.register_buffer(n, p.data.bfloat16(), persistent=False)

    def forward(self, *args, **kwargs):
        if self._trainable:
            out_q = self._m[1](*args, **kwargs)
        else:
            with torch.no_grad():
                out_q = self._m[1](*args, **kwargs).detach()
        if (self.training and self._trainable) or 1:
            with torch.no_grad():
                out_o = self._m[0](*args, **kwargs)
                # out_o = self._m[0](
                #     *tuple(
                #         [
                #             (v.bfloat16().cuda() if torch.is_tensor(v) else v)
                #             for v in args
                #         ]
                #     ),
                #     **{
                #         k: (v.bfloat16().cuda() if torch.is_tensor(v) else v)
                #         for k, v in kwargs.items()
                #     },
                # )
            # if not torch.is_tensor(out_q):
            #     # out_q = out_q.hidden_states[0]
            #     # out_o = out_o.hidden_states[0]
            #     self.diff_loss = self.diff_loss + torch.nn.functional.mse_loss(
            #         out_q.hidden_states[0], out_o.hidden_states[0].cuda()
            #     )
            # else:
            self.diff_loss = 0.0
            self.diff_loss = self.diff_loss + torch.nn.functional.mse_loss(
                out_q, out_o.cuda()
            )
            # torch.cuda.empty_cache()
        return out_q

    def __getattr__(self, name: str):
        """
        属性访问的魔法方法。
        当尝试访问一个在 VitDiffLossWrapper 实例中不存在的属性时，
        此方法会被调用。我们将其代理到 self._quant_module 中。
        """
        try:
            return super().__getattr__(name)  # Try accessing the attribute normally
        except AttributeError:
            # 避免在如 pickle 等序列化操作中产生无限递归
            if name.startswith("_") or not self._trainable:
                raise AttributeError(
                    f"'{type(self).__name__}' object has no attribute '{name}'"
                )
            try:
                # 尝试从 quant_module 获取属性
                return getattr(self._m[1], name)
            except AttributeError:
                # 如果 quant_module 也没有，则抛出标准的 AttributeError
                raise AttributeError(
                    f"'{type(self).__name__}' object and its 'quant_module' have no attribute '{name}'"
                )

    def __delattr__(self, name):
        try:
            return super().__delattr__(name)
        except AttributeError:
            if name.startswith("_"):
                raise
            else:
                return self._m[1].__delattr__(name)

    def state_dict(self, *args, **kwargs):
        """
        重写 state_dict 方法。
        直接返回内部 quant_module 的 state_dict，
        这样保存的模型就只包含量化模型的参数和缓冲区。
        """
        # 将所有参数传递给真正的 state_dict 方法
        return self._m[1].state_dict(*args, **kwargs)

    def load_state_dict(self, state_dict, strict=True):
        """
        重写 load_state_dict 方法。
        直接将状态字典加载到内部的 quant_module 中。
        """
        # 将状态字典和 strict 参数传递给真正的 load_state_dict 方法
        return self._m[1].load_state_dict(state_dict, strict=strict)

    # ---- 2. 模块层次结构和参数的完全代理 ----

    def register_buffer(self, name, tensor, persistent=True):
        return self._m[1].register_buffer(name, tensor, persistent)

    def parameters(self, recurse: bool = True) -> Iterator[nn.Parameter]:
        return self._m[1].parameters(recurse)

    def named_parameters(
        self, prefix: str = "", recurse: bool = True
    ) -> Iterator[Tuple[str, nn.Parameter]]:
        return self._m[1].named_parameters(prefix, recurse)

    def buffers(self, recurse: bool = True) -> Iterator[torch.Tensor]:
        return self._m[1].buffers(recurse)

    def named_buffers(
        self, prefix: str = "", recurse: bool = True
    ) -> Iterator[Tuple[str, torch.Tensor]]:
        return self._m[1].named_buffers(prefix, recurse)

    def children(self) -> Iterator["nn.Module"]:
        return self._m[1].children()

    def named_children(self) -> Iterator[Tuple[str, "nn.Module"]]:
        return self._m[1].named_children()

    # `modules` 和 `named_modules` 需要特殊处理，因为我们需要将 wrapper 本身包含进去
    def modules(self) -> Iterator["nn.Module"]:
        # 首先产生 wrapper 自身
        yield self
        # 然后产生 quant_module 的所有子模块（但不包括 quant_module 自身）
        for module in self._m[1].children():
            yield from module.modules()

    def named_modules(
        self,
        memo: Optional[Set] = None,
        prefix: str = "",
        remove_duplicate: bool = True,
    ):
        # 这是一个精巧的实现，确保 wrapper 出现在层次结构中，但其子项来自 quant_module
        if memo is None:
            memo = set()
        if self not in memo:
            if remove_duplicate:
                memo.add(self)
            # 产生 wrapper 自身
            yield prefix, self._m[1]
            # 递归地产生 quant_module 的子模块
            for name, module in self._m[1].named_children():
                if module is not None:
                    submodule_prefix = prefix + ("." if prefix else "") + name
                    yield from module.named_modules(
                        memo, submodule_prefix, remove_duplicate
                    )

    def __repr__(self):
        # 让打印出来的结构也像 quant_module
        # 同时附加上 wrapper 的信息
        main_repr = self._m[1].__repr__()
        return f"VitDiffLossWrapper (wrapping):\n{main_repr}"


def replace_module(model: nn.Module, replace_map: dict):
    for name, module in reversed(model._modules.items()):
        if len(list(module.children())) > 0 and not any(
            isinstance(module, i) for i in replace_map.keys()
        ):
            model._modules[name] = replace_module(module, replace_map)

        for m, act in replace_map.items():
            if type(module) is m and not isinstance(module, (QuantModule, QuantBlock)):
                model._modules[name] = act(module)
    return model


def quant_llama_model(
    model: LlamaModel, quant_args: QuantizationConfigArgs, use_diff_loss=False
):
    model.config.use_cache = False
    # for i in range(31,32):
    #     model.layers[i] = QuantLlamaDecoderLayer(
    #         model.layers[i], quant_args, True
    #     )
    # LlamaAttention.forward = QuantLlamaAttn.forward_linear

    def quant(model):
        model.layers = nn.ModuleList(
            [
                QuantLlamaDecoderLayer(org_layer, quant_args)
                for org_layer in model.layers
            ]
        )

        model.norm = QuantRMSNorm(
            model.norm,
            input_quant_params=quant_args.layernorm_input_quant_params,
            output_quant_params=quant_args.layernorm_output_quant_params,
        )
        return model

    if use_diff_loss:
        o = copy.deepcopy(model)
        q = quant(model)
        return VitDiffLossWrapper(o, q, trainable=True)
    else:
        q = quant(model)
        return q


def quant_vit(model: ViT, quant_args: QuantizationConfigArgs, use_diff_loss=False):
    quant_map = {
        Attntion: lambda m: QuantVitAttn(m, quant_args),
        nn.Linear: lambda m: QuantLinear(
            m,
            quant_args.weight_quant_params,
            quant_args.act_quant_params,
        ),
        nn.Conv2d: lambda m: QuantConv2d(
            m, quant_args.conv_weight_quant_params, quant_args.act_quant_params
        ),
        nn.LayerNorm: lambda m: QuantLayerNorm(
            m, quant_args.layernorm_vit_input_quant_params
        ),
        MLPLinear: lambda m: QuantLinear(
            m,
            quant_args.weight_quant_params,
            quant_args.mlp_act_quant_params,
        ),
        nn.GELU: lambda m: QuantGELU(
            m,
            quant_args.gelu_input_params,
            quant_args.sigmoid_quant_params,
            quant_args.hadamard1_a2_quant_params,
        ),
    }

    if use_diff_loss:
        o = copy.deepcopy(model)
        q = replace_module(model, quant_map)

        return VitDiffLossWrapper(o, q, trainable=True)
    else:
        q = replace_module(model, quant_map)
        return q


def quant_proj(
    model: VLAProjector, quant_args: QuantizationConfigArgs, use_diff_loss=False
):
    quant_map = {
        nn.Linear: lambda m: QuantLinear(
            m,
            quant_args.weight_quant_params,
            quant_args.mlp_act_quant_params,
        ),
        nn.GELU: lambda m: QuantGELU(m, quant_args.gelu_input_params),
    }
    if use_diff_loss:
        o = copy.deepcopy(model)
        q = replace_module(model, quant_map)

        return VitDiffLossWrapper(o, q, trainable=True)
    else:
        q = replace_module(model, quant_map)
        return q
