# Copyright (c) 2024 Huawei Technologies Co., Ltd.
# Copyright 2024 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import importlib.metadata
import sys
from functools import wraps
from typing import Any, Dict, List, Optional

import torch
import transformers
from packaging import version
from transformers.modeling_utils import PreTrainedModel
from transformers.pytorch_utils import Conv1D
from transformers.quantizers.quantizer_bnb_4bit import logger
from transformers.quantizers.quantizers_utils import get_module_from_name
from transformers.utils import (
    ACCELERATE_MIN_VERSION,
    is_accelerate_available,
    is_torch_npu_available,
    is_torch_xpu_available,
)
from transformers.utils.quantization_config import QuantizationMethod


def create_quantized_param_patch(
    self,
    model: "PreTrainedModel",
    param_value: "torch.Tensor",
    param_name: str,
    target_device: "torch.device",
    state_dict: Dict[str, Any],
    unexpected_keys: Optional[List[str]] = None,
):
    """
    combines logic from _load_state_dict_into_meta_model and .integrations.bitsandbytes.py::set_module_quantized_tensor_to_device()
    """
    import bitsandbytes as bnb

    module, tensor_name = get_module_from_name(model, param_name)

    if tensor_name not in module._parameters:
        raise ValueError(f"{module} does not have a parameter or a buffer named {tensor_name}.")

    old_value = getattr(module, tensor_name)

    if isinstance(target_device, int) and is_torch_npu_available():
        target_device = f"npu:{target_device}"

    if tensor_name == "bias":
        if param_value is None:
            new_value = old_value.to(target_device)
        else:
            new_value = param_value.to(target_device)

        new_value = torch.nn.Parameter(new_value, requires_grad=old_value.requires_grad)
        module._parameters[tensor_name] = new_value
        return

    if not isinstance(module._parameters[tensor_name], bnb.nn.Params4bit):
        raise ValueError("this function only loads `Linear4bit components`")
    if (
        old_value.device == torch.device("meta")
        and target_device not in ["meta", torch.device("meta")]
        and param_value is None
    ):
        raise ValueError(f"{tensor_name} is on the meta device, we need a `value` to put in on {target_device}.")

    # construct `new_value` for the module._parameters[tensor_name]:
    if self.pre_quantized:
        # 4bit loading. Collecting components for restoring quantized weight
        # This can be expanded to make a universal call for any quantized weight loading

        if not self.is_serializable:
            raise ValueError(
                "Detected int4 weights but the version of bitsandbytes is not compatible with int4 serialization. "
                "Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`."
            )

        if (param_name + ".quant_state.bitsandbytes__fp4" not in state_dict) and (
            param_name + ".quant_state.bitsandbytes__nf4" not in state_dict
        ):
            raise ValueError(
                f"Supplied state dict for {param_name} does not contain `bitsandbytes__*` and possibly other `quantized_stats` components."
            )

        quantized_stats = {}
        for k, v in state_dict.items():
            if param_name + "." in k:
                quantized_stats[k] = v
                if unexpected_keys is not None and k in unexpected_keys:
                    unexpected_keys.remove(k)

        param_kwargs = {}
        if self.is_bnb_supports_quant_storage_module:
            param_kwargs["module"] = module

        new_value = bnb.nn.Params4bit.from_prequantized(
            data=param_value,
            quantized_stats=quantized_stats,
            requires_grad=False,
            device=target_device,
            **param_kwargs,
        )
    else:
        new_value = param_value.to("cpu")

        # Support models using `Conv1D` in place of `nn.Linear` (e.g. openai-community/gpt2) by transposing the weight matrix prior to quantization.
        # Since weights are saved in the correct "orientation", we skip transposing when loading.
        if issubclass(module.source_cls, Conv1D):
            new_value = new_value.T

        kwargs = old_value.__dict__
        new_value = bnb.nn.Params4bit(new_value, requires_grad=False, **kwargs).to(target_device)

    module._parameters[tensor_name] = new_value


# Copied from transformers.quantizers.quantizer_bnb_8bit.Bnb8BitHfQuantizer.update_device_map
def update_device_map_patch(self, device_map):
    if device_map is None:
        if torch.cuda.is_available():
            device_map = {"": torch.cuda.current_device()}
        elif is_torch_npu_available():
            device_map = {"": f"npu:{torch.npu.current_device()}"}
        elif is_torch_xpu_available():
            device_map = {"": f"xpu:{torch.xpu.current_device()}"}
        else:
            device_map = {"": "cpu"}
    return device_map


def post_init_patch(self):
    r"""
    Safety checker that arguments are correct - also replaces some NoneType arguments with their default values.
    """
    if not isinstance(self.load_in_4bit, bool):
        raise TypeError("load_in_4bit must be a boolean")

    if not isinstance(self.load_in_8bit, bool):
        raise TypeError("load_in_8bit must be a boolean")

    if not isinstance(self.llm_int8_threshold, float):
        raise TypeError("llm_int8_threshold must be a float")

    if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list):
        raise TypeError("llm_int8_skip_modules must be a list of strings")

    if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool):
        raise TypeError("llm_int8_enable_fp32_cpu_offload must be a boolean")

    if not isinstance(self.llm_int8_has_fp16_weight, bool):
        raise TypeError("llm_int8_has_fp16_weight must be a boolean")

    if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype):
        raise TypeError("bnb_4bit_compute_dtype must be torch.dtype")

    if not isinstance(self.bnb_4bit_quant_type, str):
        raise TypeError("bnb_4bit_quant_type must be a string")

    if not isinstance(self.bnb_4bit_use_double_quant, bool):
        raise TypeError("bnb_4bit_use_double_quant must be a boolean")

    if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes-npu-beta")) >= version.parse(
        "0.39.0"
    ):
        raise ValueError("4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version")


def is_bitsandbytes_available_patch():
    # `bitsandbytes` versions older than 0.43.1 eagerly require CUDA at import time,
    # so those versions of the library are practically only available when CUDA is too.
    if version.parse(importlib.metadata.version("bitsandbytes-npu-beta")) < version.parse("0.43.1"):
        return torch.cuda.is_available()

    # Newer versions of `bitsandbytes` can be imported on systems without CUDA.
    return True


def validate_environment_patch(self, *args, **kwargs):
    if not is_accelerate_available():
        raise ImportError(
            f"Using `bitsandbytes` 4-bit quantization requires Accelerate: `pip install 'accelerate>={ACCELERATE_MIN_VERSION}'`"
        )
    if not is_bitsandbytes_available_patch():
        raise ImportError(
            "Using `bitsandbytes` 4-bit quantization requires the latest version of bitsandbytes: `pip install -U bitsandbytes`"
        )

    from transformers.integrations import validate_bnb_backend_availability
    from transformers.utils import is_bitsandbytes_multi_backend_available

    bnb_multibackend_is_enabled = is_bitsandbytes_multi_backend_available()
    validate_bnb_backend_availability(raise_exception=True)
    if kwargs.get("from_tf", False) or kwargs.get("from_flax", False):
        raise ValueError(
            "Converting into 4-bit or 8-bit weights from tf/flax weights is currently not supported, please make"
            " sure the weights are in PyTorch format."
        )
    device_map = kwargs.get("device_map", None)

    if (
        device_map is not None
        and isinstance(device_map, dict)
        and not self.quantization_config.llm_int8_enable_fp32_cpu_offload
    ):
        device_map_without_lm_head = {
            key: device_map[key] for key in device_map.keys() if key not in self.modules_to_not_convert
        }
        if set(device_map.values()) == {"cpu"} and bnb_multibackend_is_enabled:
            pass
        elif "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
            raise ValueError(
                "Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the "
                "quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules "
                "in 32-bit, you need to set `llm_int8_enable_fp32_cpu_offload=True` and pass a custom `device_map` to "
                "`from_pretrained`. Check the official documentation for more details: "
                "https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu"
            )

    if version.parse(importlib.metadata.version("bitsandbytes-npu-beta")) < version.parse("0.39.0"):
        raise ValueError(
            "You have a version of `bitsandbytes` that is not compatible with 4bit inference and training"
            " make sure you have the latest version of `bitsandbytes` installed"
        )


@wraps(torch.nn.Module.to)
def to(self, *args, **kwargs):
    # For BNB/GPTQ models, we prevent users from casting the model to another dtype to restrict unwanted behaviours.
    # the correct API should be to load the model with the desired dtype directly through `from_pretrained`.
    dtype_present_in_args = "dtype" in kwargs

    if not dtype_present_in_args:
        for arg in args:
            if isinstance(arg, torch.dtype):
                dtype_present_in_args = True
                break

    if getattr(self, "quantization_method", None) == QuantizationMethod.HQQ:
        raise ValueError("`.to` is not supported for HQQ-quantized models.")

    if dtype_present_in_args and getattr(self, "quantization_method", None) == QuantizationMethod.QUARK:
        raise ValueError("Casting a Quark quantized model to a new `dtype` is not supported.")

    # Checks if the model has been loaded in 4-bit or 8-bit with BNB
    if getattr(self, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES:
        if dtype_present_in_args:
            raise ValueError(
                "You cannot cast a bitsandbytes model in a new `dtype`. Make sure to load the model using `from_pretrained` using the"
                " desired `dtype` by passing the correct `torch_dtype` argument."
            )

        if getattr(self, "is_loaded_in_8bit", False):
            raise ValueError(
                "`.to` is not supported for `8-bit` bitsandbytes models. Please use the model as it is, since the"
                " model has already been set to the correct devices and casted to the correct `dtype`."
            )
        elif is_torch_npu_available():
            if version.parse(importlib.metadata.version("bitsandbytes-npu-beta")) < version.parse("0.43.1"):
                raise ValueError(
                    "Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. "
                    f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2."
                )
        elif version.parse(importlib.metadata.version("bitsandbytes")) < version.parse("0.43.2"):
            raise ValueError(
                "Calling `to()` is not supported for `4-bit` quantized models with the installed version of bitsandbytes. "
                f"The current device is `{self.device}`. If you intended to move the model, please install bitsandbytes >= 0.43.2."
            )
    elif getattr(self, "quantization_method", None) == QuantizationMethod.GPTQ:
        if dtype_present_in_args:
            raise ValueError(
                "You cannot cast a GPTQ model in a new `dtype`. Make sure to load the model using `from_pretrained` using the desired"
                " `dtype` by passing the correct `torch_dtype` argument."
            )
    return super(PreTrainedModel, self).to(*args, **kwargs)


def is_serializable_patch(self, safe_serialization=None):
    _is_4bit_serializable = version.parse(importlib.metadata.version("bitsandbytes-npu-beta")) >= version.parse(
        "0.41.3"
    )
    if not _is_4bit_serializable:
        logger.warning(
            "You are calling `save_pretrained` to a 4-bit converted model, but your `bitsandbytes` version doesn't support it. "
            "If you want to save 4-bit models, make sure to have `bitsandbytes>=0.41.3` installed."
        )
        return False
    return True


def patch_bnb():
    for module in sys.modules.values():
        if hasattr(module, "is_bitsandbytes_available"):
            module.is_bitsandbytes_available = is_bitsandbytes_available_patch

    setattr(
        transformers.quantizers.quantizer_bnb_4bit.Bnb4BitHfQuantizer,
        "create_quantized_param",
        create_quantized_param_patch,
    )
    setattr(transformers.quantizers.quantizer_bnb_4bit.Bnb4BitHfQuantizer, "update_device_map", update_device_map_patch)
    setattr(transformers.BitsAndBytesConfig, "post_init", post_init_patch)
    setattr(
        transformers.quantizers.quantizer_bnb_4bit.Bnb4BitHfQuantizer,
        "validate_environment",
        validate_environment_patch,
    )
    setattr(transformers.quantizers.quantizer_bnb_4bit.Bnb4BitHfQuantizer, "is_serializable", is_serializable_patch)
    setattr(transformers.modeling_utils.PreTrainedModel, "to", to)
