import torch
import torch.nn as nn
import torch.nn.functional as F
from quantize.quantizer import create_quantizer
from quantize.base import QuantModuleNoLinear


class MatMul(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, x1, x2):
        return torch.matmul(x1, x2)


class QuantMatMul(QuantModuleNoLinear):
    def __init__(
        self,
        x1_quant_params: dict = {},
        x2_quant_params: dict = {},
        d_model=None,
    ):
        super().__init__(MatMul())
        # de-activate the quantized forward default
        self.use_act_quant = False
        # initialize quantizer
        self.x1_quantizer = create_quantizer(**x1_quant_params, channel_dim=d_model)
        self.x2_quantizer = create_quantizer(**x2_quant_params, channel_dim=d_model)


    def quant_x1(self, x1):
        if self.use_act_quant:
            x1 = self.x1_quantizer(x1)
        return x1

    def quant_x2(self, x2):
        if self.use_act_quant:
            x2 = self.x2_quantizer(x2)
        return x2

    def f(
        self,
        x1,
        x2,
        **kwargs,
    ):
        # if "qkt_mm" in self.name and "fused" not in self.name:
        #     return torch.matmul(x1, x2)
            

        # self.use_act_quant = False
        # with torch.no_grad():
        #     if torch.any(torch.isnan(x1)) or torch.any(torch.isinf(x1)):
        #         raise ValueError(
        #             f"{self.name} {torch.isnan(x1).sum()} {torch.isinf(x1).sum()}"
        #         )
        x1_deqaunt = self.quant_x1(x1)
        # with torch.no_grad():
        #     if torch.any(torch.isnan(x1_deqaunt)) or torch.any(torch.isinf(x1_deqaunt)):
        #         raise ValueError(
        #             f"{self.name} {torch.isnan(x1_deqaunt).sum()} {torch.isinf(x1_deqaunt).sum()}"
        #         )
        x2_deqaunt = self.quant_x2(x2)
        # x1_deqaunt = x1
        # x2_deqaunt = x2

        out = torch.matmul(x1_deqaunt, x2_deqaunt)
        # if "22" in self.name:
        #     # l1 = (
        #     #     torch.nn.functional.mse_loss(x1_deqaunt, x1, reduction="none")
        #     #     .transpose(1, 2)
        #     #     .flatten(-2)
        #     #     .reshape(-1, 1024)
        #     #     .max(dim=0)
        #     # )
        #     l2 = (
        #         torch.nn.functional.mse_loss(x2_deqaunt, x2, reduction="none")
        #         .flatten(1, 2)
        #         .transpose(1, 2)
        #         .reshape(-1, 1024)
        #         .max(dim=0)
        #     )
        #     print(
        #         self.name,
        #         # x1.shape,
        #         # x2.shape,
        #         # self.x1_quantizer.scale.shape,
        #         # self.x2_quantizer.scale.shape,
        #         # f"{l1[0].max()}",
        #         # f"{x1.max()}",
        #         # f"{l1[1][l1[0].argmax()]}",
        #         f"{l2[0].max()}",
        #         f"{x2.max()}",
        #         f"{x2.min()}",
        #         f"{x2.flatten(1, 2).transpose(1, 2).reshape(-1, 1024).min(dim=-1)[0][l2[1][l2[0].argmax()]]}",
        #         f"{l2[1][l2[0].argmax()]}",
        #         f"{self.x2_quantizer.scale.view(-1)[l2[0].argmax()]}",
        #         f"{self.x2_quantizer.zero_point.view(-1)[l2[0].argmax()]}",
        #         f"{x2.flatten(1, 2).transpose(1, 2).reshape(-1, 1024)[l2[1][l2[0].argmax()], l2[0].argmax()]}",
        #         f"{x2_deqaunt.flatten(1, 2).transpose(1, 2).reshape(-1, 1024)[l2[1][l2[0].argmax()], l2[0].argmax()]}",
        #         f"{torch.nn.functional.mse_loss(x2_deqaunt, x2, reduction='none').flatten(1, 2).transpose(1, 2).reshape(-1, 1024)[l2[1][l2[0].argmax()], l2[0].argmax()]}",
        #         sep="\n",
        #     )
        #     input("fuck!")
        # self.save(out, "fp_out.pth")

        # self.save(x1, "x1.pth")
        # self.save(x2, "x2.pth")
        return out
