# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/hiyouga/LLaMA-Factory/blob/7965e9840c18c71028c1a3a04c404e9fae196c0d/src/llamafactory/model/loader.py#L53

import os
from types import MethodType
from typing import Union

import torch

import transformers
from accelerate import PartialState
from transformers import (
    AutoConfig,
    AutoTokenizer,
    AutoModelForCausalLM,
    BitsAndBytesConfig,
    PreTrainedTokenizerBase,
)
from transformers.dynamic_module_utils import get_relative_imports
from transformers.integrations import is_deepspeed_zero3_enabled
from openmind.utils import logging
from openmind.integrations.transformers.npu_fused_ops.sdk import SUPPORTED_FUSED_MODELS, map_fused_kernel_to_model
from openmind.flow.arguments import get_args
from openmind.flow.model.model_registry import SUPPORTED_MODELS
from openmind.flow.model.adapter import apply_adapter
from openmind.integrations.transformers.bitsandbytes import patch_bnb

logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


def try_download_from_hub() -> str:
    args = get_args()

    if os.path.exists(args.model_name_or_path):
        return args.model_name_or_path

    # download from modelers
    from openmind.utils.hub import snapshot_download

    return snapshot_download(
        args.model_name_or_path,
        revision=args.model_revision,
        cache_dir=args.cache_dir,
        token=args.token,
    )


def get_current_device(device_index: Union[str, int]):
    r"""
    Return current device.
    """
    state = PartialState()
    device_type = state.device.type.lower()
    device = f"{device_type}:{device_index}"
    return device


def skip_check_imports() -> None:
    r"""
    Avoids flash attention import error in custom model files.
    """
    if os.environ.get("FORCE_CHECK_IMPORTS", "0").lower() not in ["true", "1"]:
        transformers.dynamic_module_utils.check_imports = get_relative_imports


def get_init_kwargs():
    args = get_args()
    skip_check_imports()

    if not args.model_id and not args.model_name_or_path:
        raise ValueError("Please set 'model_id' or 'model_name_or_path' to load model.")

    if args.model_id is not None and args.model_name_or_path is None:
        args.model_name_or_path = SUPPORTED_MODELS[args.model_id].path["modelers"]

    args.model_name_or_path = try_download_from_hub()

    return {
        "trust_remote_code": args.trust_remote_code,
        "cache_dir": args.cache_dir,
        "revision": args.model_revision,
    }


def get_config():
    r"""
    Load model config.
    NB: May change attributes in model_args
    """
    args = get_args()
    init_kwargs = get_init_kwargs()
    config = AutoConfig.from_pretrained(args.model_name_or_path, **init_kwargs)
    return config


def get_tokenizer():
    r"""
    Load pretrained tokenizer.
    """
    args = get_args()
    init_kwargs = get_init_kwargs()
    try:
        tokenizer = AutoTokenizer.from_pretrained(
            args.model_name_or_path,
            use_fast=args.use_fast_tokenizer,
            split_special_tokens=args.split_special_tokens,
            padding_side="right",
            **init_kwargs,
        )
    except ValueError:
        # try the fast version
        tokenizer = AutoTokenizer.from_pretrained(
            args.model_name_or_path,
            use_fast=True,
            padding_side="right",
            **init_kwargs,
        )
    except Exception as e:
        raise RuntimeError("Failed to load tokenizer.") from e

    if "PreTrainedTokenizerBase" not in str(tokenizer._pad.__func__):
        tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)

    if args.new_special_tokens is not None:
        num_added_tokens = tokenizer.add_special_tokens(
            dict(additional_special_tokens=args.new_special_tokens),
            replace_additional_special_tokens=False,
        )
        logger.info_rank0(
            "Add special tokens: {}, num_added_tokens: {}".format(
                ",".join(args.special_tokens_dict.keys()), num_added_tokens
            )
        )
        if num_added_tokens > 0 and not args.resize_vocab:
            args.resize_vocab = True
            logger.warning_rank0("New tokens have been added, changed `resize_vocab` to True.")

    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    return tokenizer


def patch_config(config):
    """
    Set the config to use npu fused operators is deprecated, and it will be removed in next versions.
    Please use the `openmind.apply_fused_kernel` instead.
    """
    if hasattr(config, "_attn_implementation") and config._attn_implementation == "npu_fusion_attention":
        logger.warning_rank0(
            "Currently set `_attn_implementation='npu_fusion_attention'` is not supported, and it will be removed "
            "in next versions. Use the `openmind.apply_fused_kernel` instead."
        )
        setattr(config, "_attn_implementation", "eager")

    if hasattr(config, "use_npu_rms_norm"):
        logger.warning_rank0(
            "Currently set `use_npu_rms_norm=True` is not supported, and it will be removed "
            "in next versions. Use the `openmind.apply_fused_kernel` instead."
        )
        delattr(config, "use_npu_rms_norm")


def get_model():
    r"""
    Loads pretrained model.
    """
    args = get_args()
    init_kwargs = get_init_kwargs()
    config = get_config()

    # Set default torch_dtype using value from config.json (if available)
    if args.bf16:
        init_kwargs["torch_dtype"] = torch.bfloat16
    elif args.fp16:
        init_kwargs["torch_dtype"] = torch.float16
    else:
        init_kwargs["torch_dtype"] = getattr(config, "torch_dtype", torch.float32)

    init_kwargs["low_cpu_mem_usage"] = args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())

    # For chat/deploy/export, set cards by following three ways
    if not args.do_train:
        if os.getenv("ASCEND_RT_VISIBLE_DEVICES", None) is not None:
            init_kwargs["device_map"] = "auto"
        elif args.device == "cpu":
            init_kwargs["device_map"] = "cpu"
        else:
            if not isinstance(args.device, int):
                raise ValueError(
                    "param device only support single card. If you want to use mutiple cards to split model weight, please set environment `ASCEND_RT_VISIBLE_DEVICES`."
                )
            else:
                init_kwargs["device_map"] = {"": get_current_device(args.device)}

    if args.load_in_4bit:
        patch_bnb()
        if init_kwargs["torch_dtype"] != torch.bfloat16:
            raise ValueError("we only support bnb_4bit_compute_dtype=bf16. Please set parameter `bf16` to True.")
        logger.info_rank0(
            "The torch_dtype is set to torch.bfloat16. Currently support bnb_4bit_compute_dtype is only bf16."
        )

        nf4_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16,
        )
    else:
        nf4_config = None

    patch_config(config)
    if config.architectures and config.architectures[0] in SUPPORTED_FUSED_MODELS and args.do_train:
        map_fused_kernel_to_model(
            config.architectures[0],
            use_npu_fusion_attention=args.use_npu_fusion_attention,
            use_fused_rms_norm=args.use_fused_rms_norm,
            use_fused_rope=args.use_fused_rope,
            use_fused_swiglu=args.use_fused_swiglu,
        )

    model = AutoModelForCausalLM.from_pretrained(
        pretrained_model_name_or_path=args.model_name_or_path,
        config=config,
        quantization_config=nf4_config,
        **init_kwargs,
    )

    model = apply_adapter(model, args.do_train)

    if args.do_train:
        model.train()

        if args.use_gradient_checkpointing and model.supports_gradient_checkpointing:
            model.gradient_checkpointing_enable()
            logger.info_rank0("Gradient checkpointing has been enabled.")
    else:
        model.eval()

    if args.print_param_status and int(os.getenv("LOCAL_RANK", "0")) == 0:
        for name, param in model.named_parameters():
            print(f"name: {name}, dtype: {param.dtype}, device: {param.device}, trainable: {param.requires_grad}")

    return model
