import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer
from quantize.base import QuantModuleNoLinear
import os
import torch.utils.checkpoint


class QuantConv2d(QuantModuleNoLinear):
    """
    Quantized Module that can perform quantized convolution or normal convolution.
    To activate quantization, please use set_quant_state function.
    """

    def __init__(
        self,
        org_module: nn.Conv2d,
        weight_quant_params: dict = {},
        act_quant_params: dict = {},
    ):
        super().__init__(org_module)

        self.stride = org_module.stride
        self.padding = org_module.padding

        self.register_buffer(
            "weight", org_module.weight.data.to(torch.bfloat16), persistent=False
        )

        if org_module.bias is not None:
            self.register_buffer(
                "bias", org_module.bias.data.to(torch.bfloat16), persistent=False
            )
        else:
            self.bias = None
        # self.weight = org_module.weight
        # self.bias = org_module.bias

        # de-activate the quantized forward default
        self.use_weight_quant = False
        self.use_act_quant = False
        # initialize quantizer
        self.weight_quantizer = create_quantizer(
            **weight_quant_params,
        )
        self.act_quantizer = create_quantizer(
            **act_quant_params,
        )

    def quant_weight(self):
        if self.use_weight_quant:
            weight = self.weight_quantizer(self.weight)
        else:
            weight = self.weight
        return weight

    def f(
        self,
        input: torch.Tensor,
        **kwargs,
    ):
        weight = self.quant_weight()
        bias = self.bias

        if self.use_act_quant:
            input = self.act_quantizer(input)

        out = torch.nn.functional.conv2d(
            input, weight, bias, stride=self.stride, padding=self.padding
        )
        alpha = 0.1
        out = out * alpha + out.detach() * (1 - alpha)

        # self.save(input, "input_dequant.pth")
        # self.save(weight, "weight_dequant.pth")
        # self.save(bias, "bias.pth")
        # self.save(out, "fp_out.pth")

        return out
