from pathlib import Path
import torch
import torch.nn as nn
import torch.nn.functional as F
import types
from quantize.quantizer import Quantizer
from utils.overwatch import initialize_overwatch

logger = initialize_overwatch("vla_qat")


class QuantModule(nn.Module):
    """
    Base class for qunat modules
    """

    def __init__(self, org_module, **kwargs):
        nn.Module.__init__(self, **kwargs)
        assert not isinstance(org_module, QuantModule)
        self.org_module: list[torch.nn.Module] = [org_module]
        self.linear_only: bool = False
        self.name: str = self.__class__.__name__
        self.save_tensors = False
        self.save_path: Path = None
        self.online_quant: bool = False

    @property
    def quantizers(self):
        return [m for m in self.modules if isinstance(m, Quantizer)]

    def forward(self, *args, **kwargs):
        return self.f(*args, **kwargs)

    def f(self, *args, **kwargs):
        raise NotImplementedError

    @torch.compiler.disable
    def save(self, tensor: torch.Tensor, name: str, dtype=torch.float16):
        if self.save_tensors:
            dir = self.save_path
            dir.mkdir(exist_ok=True, parents=True)
            torch.save(
                (tensor.to(dtype) if torch.is_tensor(tensor) else tensor), dir / name
            )

    def set_names(self):
        for name, module in self.named_modules():
            if isinstance(module, (QuantModule, Quantizer)):
                module.name = name

    def set_quant_state(
        self,
        weight_quant: bool = False,
        act_quant: bool = False,
        integer_only: bool = False,
        use_old: bool = False,
        linear_only: bool = False,
        embed_quant: bool = False,  # unuse
    ):
        if hasattr(self, "use_weight_quant"):
            self.use_weight_quant = weight_quant
        if hasattr(self, "use_act_quant"):
            self.use_act_quant = act_quant
        # if hasattr(self, "integer_only"):
        self.integer_only = integer_only
        self.use_old = use_old
        self.linear_only = linear_only
        # names = []
        # for name, m in self.named_modules():
        #     if isinstance(m, (QuantModule)) and m is not self:
        #         names.append(name)
        #         m.set_quant_state(
        #             weight_quant=weight_quant,
        #             act_quant=act_quant,
        #             integer_only=integer_only,
        #             linear_only=linear_only,
        #             use_old=use_old,
        #             embed_quant=embed_quant,
        #         )


QUANT_LOSS : nn.Module | None = None


class QuantModuleLinear(QuantModule):
    # def __init__(self, org_module, **kwargs):
    #     super().__init__(org_module, **kwargs)
    #     self.f = torch.compile(self.f)
    def forward(self, *args, **kwargs):
        if self.online_quant:
            return self.online_f(*args, **kwargs)
        res = self.f(*args, **kwargs)
        if QUANT_LOSS is not None and "language" in self.name:
            # logger.info(
            #     f"{self.name}: {torch.nn.functional.mse_loss(res, self.org_module[0](*args, **kwargs)).detach().cpu().item():.6f}"
            # )
            kwargs.pop("output_residual",None)
            kwargs.pop("attn_mask",None)
            with torch.inference_mode():
                QUANT_LOSS.diff_dict[self.name] = (
                    torch.nn.functional.mse_loss(res, self.org_module[0](*args, **kwargs))
                    .detach()
                    .cpu()
                    .item()
                )
            # if "QuantLinear" in self.name:
            #     try:
            #         raise ValueError(f"Fuck! {self.name} {self.weight_quantizer}")
            #     except ValueError as e:
            #         logger.info(self)
            #         raise
            #         logger.error(f"An error occurred: {self.name}", exc_info=True)
            #         import traceback
            #         import sys

            #         # 获取异常信息
            #         exc_type, exc_value, exc_traceback = sys.exc_info()

            #         # 提取堆栈信息
            #         stack_list = traceback.extract_tb(exc_traceback)

            #         # 逐层打印调用栈
            #         print("Full traceback (most recent call last):")
            #         for frame in stack_list:
            #             print(
            #                 f"  File '{frame.filename}', line {frame.lineno}, in {frame.name}"
            #             )
            #             print(f"    {frame.line}")
            #         input()

            # logger.info(e.args[0],)
            # traceback.print_exc()
            # input()

        return res

    def online_f(self, x, *args, **kwargs):
        raise NotImplementedError


class QuantModuleNoLinear(QuantModule):
    # def __init__(self, org_module, **kwargs):
    #     super().__init__(org_module, **kwargs)
    #     self.f = torch.compile(self.f)
    def forward(self, *args, **kwargs):
        if self.linear_only:
            return self.org_module[0](*args, **kwargs)
        else:
            res = self.f(*args, **kwargs)
            kwargs.pop("output_residual",None)
            kwargs.pop("attn_mask",None)
            if  QUANT_LOSS is not None and "language" in self.name:
                if len(args) >= 1:
                    x = tuple(i for i in args if torch.is_tensor(i)) if "softmax" not in self.name else (args[0],)
                else:
                    x = tuple(i for i in tuple(kwargs.values()) if torch.is_tensor(i))
                true_res = self.org_module[0](*x)
                if isinstance(true_res, tuple):
                    true_res = true_res[0]

                if isinstance(res, tuple):
                    res0 = res[0]
                else:
                    res0 = res
                # logger.info(
                #     f"{self.name}: {torch.nn.functional.mse_loss(res0, true_res).detach().cpu().item():.6f}"
                # )
                with torch.inference_mode():
                    QUANT_LOSS.diff_dict[self.name] = (
                        torch.nn.functional.mse_loss(res0, self.org_module[0](*args, **kwargs))
                        .detach()
                        .cpu()
                        .item()
                    )
            return res
