import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer, LSQPlusQuantizer
from quantize.base import QuantModuleLinear
import os
import torch.utils.checkpoint
from utils.overwatch import initialize_overwatch

logger = initialize_overwatch("vla_qat")

r_dtype = torch.float32
# r_dtype = torch.float32
# torch.set_float32_matmul_precision("high")


class QuantLinear(QuantModuleLinear, nn.Linear):
    """
    Quantized Module that can perform quantized convolution or normal convolution.
    To activate quantization, please use set_quant_state function.
    """

    def __init__(
        self,
        org_module: nn.Linear,
        weight_quant_params: dict = {},
        act_quant_params: dict = {},
        act_quantizer: LSQPlusQuantizer | None = None,
    ):
        # super().__init__(org_module)
        QuantModuleLinear.__init__(self, org_module)

        # self.register_buffer(
        #     "weight", org_module.weight.data.to(torch.bfloat16), persistent=False
        # )

        # if org_module.bias is not None:
        #     self.register_buffer(
        #         "bias", org_module.bias.data.to(torch.bfloat16), persistent=False
        #     )
        # else:
        #     self.bias = None
        self.weight = org_module.weight
        self.bias = org_module.bias
        # self.weight = nn.Parameter(org_module.weight.data.clone())
        # self.bias = None if org_module.bias is None else  nn.Parameter(org_module.bias.data.clone())

        self.in_features = org_module.in_features
        self.out_features = org_module.out_features
        # de-activate the quantized forward default
        self.use_weight_quant = False
        self.use_act_quant = False
        # initialize quantizer
        self.weight_quantizer = create_quantizer(
            **weight_quant_params, channel_dim=self.out_features
        )
        self.act_quantizer = (
            act_quantizer
            if act_quantizer is not None
            else (create_quantizer(**act_quant_params, channel_dim=self.in_features))
        )
        # self.act_quantizer = None
        if hasattr(self.org_module[0], "act_scale"):
            self.pre_scale = lambda x: x.div(self.org_module[0].act_scale)
        else:
            self.pre_scale = nn.Identity()

    def quant_weight(self):
        if self.use_weight_quant:
            weight = self.weight_quantizer(self.weight)
        else:
            weight = self.weight
        return weight

    def f(
        self,
        input: torch.Tensor,
        **kwargs,
    ):
        weight_dequant = self.quant_weight()
        bias = self.bias
        # input = self.pre_scale(input)

        if self.use_act_quant and self.act_quantizer is not None:
            input = self.act_quantizer(input)

        out = torch.nn.functional.linear(input, weight_dequant, bias)
        # logger.info(
        #     f"""{self.name},scale:{self.weight_quantizer.scale.mean()}, layer_diff:{
        #         torch.nn.functional.mse_loss(
        #             torch.nn.functional.linear(input, self.weight, bias),
        #             out,
        #             reduction="none",
        #         )
        #         .max()
        #         .item():.6f},fake_quant_diff:{
        #         torch.nn.functional.mse_loss(
        #             weight_dequant, self.weight, reduction="none"
        #         )
        #         .max()
        #         .item():.6f}"""
        # )

        return out

    def online_f(
        self,
        input: torch.Tensor,
        *,
        R1=None,
        R2=None,
        transpose=False,
        R2_v_start_dim=None,
        **kwargs,
    ):
        # if R2 is not None:
        #     logger.info(f"{self.name} use R2 with shape {R2.shape}")
        if R1 is not None:
            dtype = self.weight.dtype
            if not transpose:
                weight = self.weight.to(r_dtype) @ R1.to(r_dtype)
            else:
                weight = R1.T.to(r_dtype) @ self.weight.to(r_dtype)
            if R2 is not None:
                # Each head dim = 128 for Llama model
                # if R2_v_start_dim is not None:
                #     weight_r2 = weight[R2_v_start_dim:]
                # else:
                weight_r2 = weight

                had_dim = R2.shape[0]
                if transpose:
                    W_ = weight_r2
                    init_shape = W_.shape
                    temp = W_.reshape(-1, init_shape[-1] // had_dim, had_dim)
                    temp = temp.to(r_dtype) @ R2.to(r_dtype)
                    weight_r2 = temp.reshape(init_shape)
                else:
                    W_ = weight_r2.t()
                    transposed_shape = W_.shape
                    temp = W_.reshape(-1, transposed_shape[-1] // had_dim, had_dim)
                    temp = temp.to(r_dtype) @ R2.to(r_dtype)
                    weight_r2 = temp.reshape(transposed_shape).t()

                # if R2_v_start_dim is not None:
                #     weight[R2_v_start_dim:] = weight_r2
                # else:
                weight = weight_r2
            else:
                weight = weight.to(dtype)
            weight = weight.to(dtype)
        else:
            weight = self.weight

        w = self.weight_quantizer.fast_int_time_quant(weight)
        x = self.act_quantizer.fast_int_time_quant(input)
        # w = weight
        # x = input
        out = torch.nn.functional.linear(x, w, self.bias)

        return out
