# Copyright 2024 the LlamaFactory team.
# Copyright (c) 2024 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Adapted from https://github.com/hiyouga/LLaMA-Factory/blob/7965e9840c18c71028c1a3a04c404e9fae196c0d/src/llamafactory/model/loader.py#L53

import os
from types import MethodType
from typing import Union

import torch

import transformers
from accelerate import PartialState
from transformers import (
    AutoConfig,
    AutoTokenizer,
    AutoProcessor,
    AutoModelForCausalLM,
    BitsAndBytesConfig,
    PreTrainedTokenizerBase,
    ProcessorMixin,
    PretrainedConfig,
    PreTrainedTokenizer,
    AutoModelForVision2Seq,
)
from transformers.dynamic_module_utils import get_relative_imports
from transformers.integrations import is_deepspeed_zero3_enabled
from openmind.utils import logging, is_torch_npu_available
from openmind.integrations.transformers.npu_fused_ops.sdk import map_fused_kernel_to_model
from openmind.flow.arguments import get_args
from openmind.flow.model.model_registry import SUPPORTED_MODELS
from openmind.flow.model.adapter import apply_adapter
from openmind.flow.model.sequence_parallel.seq_utils import apply_sequence_parallel
from openmind.integrations.transformers.bitsandbytes import patch_bnb
from openmind.utils.loader_utils import get_platform_loader
from openmind.utils.arguments_utils import print_formatted_table, validate_directory

logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


def print_model_info():
    supported_models_info = list()
    for model_id, model_metadata in SUPPORTED_MODELS.items():
        model_info = {"model_id": model_id}
        model_info.update({platform: path for platform, path in model_metadata.path.items()})
        supported_models_info.append(model_info)
    print_formatted_table(supported_models_info, "keys")


def try_download_from_hub() -> str:
    args = get_args()

    # download from modelers or huggingface
    snapshot_download_func, openmind_platform = get_platform_loader("model")

    if not args.model_id and not args.model_name_or_path:
        raise ValueError("Please set 'model_id' or 'model_name_or_path' to load model.")

    if args.model_id is not None and args.model_name_or_path is None:
        try:
            args.model_name_or_path = SUPPORTED_MODELS[args.model_id].path[openmind_platform]
        except KeyError as e:
            print_model_info()
            if e.args[0] == args.model_id:
                raise ValueError(
                    "The model_id is not in supported models. Please refer to the table above to provide the correct model_id."
                ) from e
            else:
                raise ValueError(
                    "The model is not supported for download on the current platform. Please refer to the table above to provide the correct environment variable for `OPENMIND_PLATFORM`."
                ) from e

    if os.path.exists(args.model_name_or_path):
        validate_directory(args.model_name_or_path)
        return args.model_name_or_path

    return snapshot_download_func(
        args.model_name_or_path,
        revision=args.model_revision,
        cache_dir=args.cache_dir,
        token=args.token,
    )


def get_current_device(device_index: Union[str, int]):
    r"""
    Return current device.
    """
    state = PartialState()
    device_type = state.device.type.lower()
    device = f"{device_type}:{device_index}"
    return device


def skip_check_imports() -> None:
    r"""
    Avoids flash attention import error in custom model files.
    """
    if os.environ.get("FORCE_CHECK_IMPORTS", "0").lower() not in ["true", "1"]:
        transformers.dynamic_module_utils.check_imports = get_relative_imports


def get_init_kwargs():
    args = get_args()
    skip_check_imports()

    if not args.model_name_or_path or not os.path.exists(args.model_name_or_path):
        args.model_name_or_path = try_download_from_hub()

    return {
        "trust_remote_code": args.trust_remote_code,
        "cache_dir": args.cache_dir,
        "revision": args.model_revision,
    }


def get_config():
    r"""
    Load model config.
    NB: May change attributes in model_args
    """
    args = get_args()
    init_kwargs = get_init_kwargs()
    config = AutoConfig.from_pretrained(args.model_name_or_path, **init_kwargs)
    return config


def get_tokenizer():
    r"""
    Load pretrained tokenizer.
    """
    args = get_args()
    init_kwargs = get_init_kwargs()
    try:
        tokenizer = AutoTokenizer.from_pretrained(
            args.model_name_or_path,
            use_fast=args.use_fast_tokenizer,
            split_special_tokens=args.split_special_tokens,
            padding_side="right",
            **init_kwargs,
        )
    except ValueError:
        # try the fast version
        tokenizer = AutoTokenizer.from_pretrained(
            args.model_name_or_path,
            use_fast=True,
            padding_side="right",
            **init_kwargs,
        )
    except Exception as e:
        raise RuntimeError("Failed to load tokenizer.") from e

    if "PreTrainedTokenizerBase" not in str(tokenizer._pad.__func__):
        tokenizer._pad = MethodType(PreTrainedTokenizerBase._pad, tokenizer)

    if args.new_special_tokens is not None:
        num_added_tokens = tokenizer.add_special_tokens(
            dict(additional_special_tokens=args.new_special_tokens),
            replace_additional_special_tokens=False,
        )
        logger.info_rank0(
            "Add special tokens: {}, num_added_tokens: {}".format(
                ",".join(args.special_tokens_dict.keys()), num_added_tokens
            )
        )
        if num_added_tokens > 0 and not args.resize_vocab:
            args.resize_vocab = True
            logger.warning_rank0("New tokens have been added, changed `resize_vocab` to True.")

    if tokenizer.pad_token is None:
        tokenizer.pad_token = tokenizer.eos_token

    return tokenizer


def get_tokenizer_and_processor():
    args = get_args()
    config = get_config()
    init_kwargs = get_init_kwargs()

    tokenizer = get_tokenizer()
    try:
        processor = AutoProcessor.from_pretrained(args.model_name_or_path, **init_kwargs)
        patch_processor(processor, config, tokenizer, args)
    except Exception as e:
        logger.debug(f"Processor was not found: {e}.")
        processor = None

        # Avoid load tokenizer, see:
        # https://github.com/huggingface/transformers/blob/v4.40.0/src/transformers/models/auto/processing_auto.py#L324
    if processor is not None and "Processor" not in processor.__class__.__name__:
        processor = None

    return tokenizer, processor


def get_image_seqlen(config: PretrainedConfig) -> int:
    r"""
    Computes the number of special tokens per image.
    """
    model_type = getattr(config, "model_type", None)
    if model_type == "llava":
        image_seqlen = (config.vision_config.image_size // config.vision_config.patch_size) ** 2
        if getattr(config, "vision_feature_select_strategy", "default") == "full":  # add [CLS] token
            image_seqlen += 1
    elif model_type == "paligemma":
        image_seqlen = config.vision_config.num_image_tokens
    else:
        image_seqlen = -1

    return image_seqlen


def get_patch_size(config: PretrainedConfig, processor: ProcessorMixin) -> int:
    r"""
    Computes the patch size of the vit.
    """
    patch_size = getattr(config.vision_config, "patch_size", getattr(processor, "patch_size", -1))
    return patch_size


def get_vision_feature_select_strategy(config: PretrainedConfig, processor: ProcessorMixin) -> int:
    r"""
    Get the vision_feature_select_strategy.
    """
    vision_feature_select_strategy = getattr(
        config, "vision_feature_select_strategy", getattr(processor, "vision_feature_select_strategy", "default")
    )
    return vision_feature_select_strategy


def patch_processor(
    processor: ProcessorMixin,
    config: PretrainedConfig,
    tokenizer: PreTrainedTokenizer,
    model_args,
) -> None:
    setattr(processor, "tokenizer", tokenizer)
    if getattr(config, "vision_config", None) is not None:  # visual models
        setattr(processor, "image_seqlen", get_image_seqlen(config))
        setattr(processor, "patch_size", get_patch_size(config, processor))
        setattr(processor, "image_max_pixels", model_args.image_max_pixels)
        setattr(processor, "image_min_pixels", model_args.image_min_pixels)
        setattr(processor, "video_max_pixels", model_args.video_max_pixels)
        setattr(processor, "video_min_pixels", model_args.video_min_pixels)
        setattr(processor, "video_fps", model_args.video_fps)
        setattr(processor, "video_maxlen", model_args.video_maxlen)
        setattr(processor, "vision_feature_select_strategy", get_vision_feature_select_strategy(config, processor))


def patch_config(config):
    """
    Set the config to use npu fused operators is deprecated, and it will be removed in next versions.
    Please use the `openmind.apply_fused_kernel` instead.
    """
    if hasattr(config, "_attn_implementation") and config._attn_implementation == "npu_fusion_attention":
        logger.warning_rank0(
            "Currently set `_attn_implementation='npu_fusion_attention'` is not supported, and it will be removed "
            "in next versions. Use the `openmind.apply_fused_kernel` instead."
        )
        setattr(config, "_attn_implementation", "eager")

    if hasattr(config, "use_npu_rms_norm"):
        logger.warning_rank0(
            "Currently set `use_npu_rms_norm=True` is not supported, and it will be removed "
            "in next versions. Use the `openmind.apply_fused_kernel` instead."
        )
        delattr(config, "use_npu_rms_norm")


def disable_internal_format():
    if is_torch_npu_available():
        # Fix illegal format for Conv3DBackpropFilter on npu.
        torch.npu.config.allow_internal_format = False


def get_model():
    r"""
    Loads pretrained model.
    """
    args = get_args()
    init_kwargs = get_init_kwargs()
    config = get_config()

    # Set default torch_dtype using value from config.json (if available)
    if args.bf16:
        init_kwargs["torch_dtype"] = torch.bfloat16
    elif args.fp16:
        init_kwargs["torch_dtype"] = torch.float16
    else:
        init_kwargs["torch_dtype"] = getattr(config, "torch_dtype", torch.float32)

    init_kwargs["low_cpu_mem_usage"] = args.low_cpu_mem_usage and (not is_deepspeed_zero3_enabled())

    # For chat/deploy/export, set cards by following three ways
    if not args.do_train:
        if os.getenv("ASCEND_RT_VISIBLE_DEVICES", None) is not None:
            init_kwargs["device_map"] = "auto"
        elif args.device == "cpu":
            init_kwargs["device_map"] = "cpu"
        else:
            if not isinstance(args.device, int):
                raise ValueError(
                    "param device only support single card. If you want to use mutiple cards to split model weight, please set environment `ASCEND_RT_VISIBLE_DEVICES`."
                )
            else:
                init_kwargs["device_map"] = {"": get_current_device(args.device)}

    if args.load_in_4bit:
        patch_bnb()
        if init_kwargs["torch_dtype"] != torch.bfloat16:
            raise ValueError("we only support bnb_4bit_compute_dtype=bf16. Please set parameter `bf16` to True.")
        logger.info_rank0(
            "The torch_dtype is set to torch.bfloat16. Currently support bnb_4bit_compute_dtype is only bf16."
        )

        nf4_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.bfloat16,
        )
    else:
        nf4_config = None

    patch_config(config)
    if args.do_train:
        map_fused_kernel_to_model(
            config.architectures,
            use_npu_fusion_attention=args.use_npu_fusion_attention,
            use_fused_rms_norm=args.use_fused_rms_norm,
            use_fused_rope=args.use_fused_rope,
            use_fused_swiglu=args.use_fused_swiglu,
            config=config,
        )

    if type(config) in AutoModelForVision2Seq._model_mapping.keys():  # assume built-in models
        load_class = AutoModelForVision2Seq
        disable_internal_format()

    else:
        load_class = AutoModelForCausalLM

    model = load_class.from_pretrained(
        pretrained_model_name_or_path=args.model_name_or_path,
        config=config,
        quantization_config=nf4_config,
        **init_kwargs,
    )

    if args.init_lora_weights or args.use_dora:
        model = model.to(get_current_device(os.getenv("LOCAL_RANK", 0)))

    apply_sequence_parallel(args, config)
    model = apply_adapter(model, args.do_train)

    if args.do_train:
        model.train()

        if args.use_gradient_checkpointing and model.supports_gradient_checkpointing and args.stage != "rm":
            model.gradient_checkpointing_enable()
            logger.info_rank0("Gradient checkpointing has been enabled.")
    else:
        model.eval()

    if args.print_param_status and int(os.getenv("LOCAL_RANK", "0")) == 0:
        for name, param in model.named_parameters():
            print(f"name: {name}, dtype: {param.dtype}, device: {param.device}, trainable: {param.requires_grad}")

    return model
