from typing import Callable
import torch
from utils.overwatch import initialize_overwatch

logger = initialize_overwatch("vla_qat")

default_disabled_params = [
    # ".ls",
    "embed_tokens",
    "lm_head",
    "cls_token",
    "reg_token",
    "pos_embed",
]


@torch.no_grad
def disable_params(model: torch.nn.Module, all_disable=False):
    from quantize.int_linear import QuantLinear

    # for name, p in (
    #     tuple(model.vision_backbone.featurizer.named_parameters())
    #     + tuple(model.vision_backbone.fused_featurizer.named_parameters())
    #     + tuple(model.projector.named_parameters())
    #     + tuple(model.language_model.named_parameters())
    # ):
    for name, p in tuple(model.named_parameters()):
        # if ("scale" not in name and "zero_point" not in name) or "scale_factor" in name:
        # if "quantizer" not in name:
        #     m_names = name.split(".")
        #     m = model
        #     for n in m_names[:-1]:
        #         if hasattr(m, n):
        #             m = getattr(m, n)
        #     if not isinstance(m,QuantLinear):
        #         n = m_names[-1] if len(m_names) > 1 else name
        #         # print(f"del {m} {n}")
        #         delattr(m, n)
        #         p.data.requires_grad = False
        #         m.register_buffer(n, p.data.bfloat16(), persistent=False)

        if all_disable or any((i in name) for i in default_disabled_params):
            m_names = name.split(".")
            m = model
            for n in m_names[:-1]:
                if hasattr(m, n):
                    m = getattr(m, n)
            # if not isinstance(m,QuantLinear):
            n = m_names[-1] if len(m_names) > 1 else name
            # print(f"del {m} {n}")
            logger.info(f"deleting param {name} for {type(m)}")
            delattr(m, n)
            p.data.requires_grad = False
            m.register_buffer(n, p.data.bfloat16(), persistent=False)
            assert getattr(m, n, None) is not None
            logger.info(f"{type(m)} has {n} {getattr(m, n).shape}")


@torch.no_grad
def check_trainable_parameters(model: torch.nn.Module, print_name: bool | str = False):
    for name, p in model.named_parameters():
        if p.requires_grad:
            if print_name:
                if isinstance(print_name, bool):
                    logger.info(name)
                elif isinstance(print_name, str) and print_name in name:
                    logger.info(name)

            # assert "scale" in name or "zero_point" in name or "weight" in name or "bias" in name, f"{name} {p.shape}"


def print_model_param_size(model: torch.nn.Module):
    """
    打印PyTorch模型的可学习参数总大小（MB）

    参数:
        model (torch.nn.Module): PyTorch模型
    """
    param_size = 0
    for param in model.parameters():
        if param.requires_grad:
            param_size += param.nelement() * param.element_size()

    buffer_size = 0
    # for buffer in model.buffers():
    #     buffer_size += buffer.nelement() * buffer.element_size()

    total_size = (param_size + buffer_size) / (1024**2)  # 转换为MB

    logger.info(f"模型可学习参数总大小: {total_size:.3f} MB")
    return total_size


def disable_module(module: torch.nn.Module, use_inference_mode=True,clean_mem=False) -> Callable | None:
    module.eval()
    for p in module.parameters():
        p.requires_grad = False
    if use_inference_mode:

        def f(self,*args, **kwargs):
            with torch.inference_mode():
                res = self.org_forward(*args, **kwargs)
                # if clean_mem:
                #     torch.cuda.empty_cache()
                return res

        org_forward = module.forward
        module.org_forward = module.forward

        module.forward = f.__get__(module)
        return org_forward
