# Copyright (c) Alibaba, Inc. and its affiliates.
import os
import platform
import re
from copy import deepcopy
from dataclasses import asdict, dataclass, field
from functools import partial
from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Union

import torch
import transformers
from packaging import version
from peft import PeftModel
from transformers import (AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForSequenceClassification,
                          AutoTokenizer, GenerationConfig, PretrainedConfig, PreTrainedModel, PreTrainedTokenizerBase)
from transformers.integrations import is_deepspeed_zero3_enabled
from transformers.utils import (is_torch_bf16_gpu_available, is_torch_cuda_available, is_torch_mps_available,
                                is_torch_npu_available, strtobool)
from transformers.utils.versions import require_version

from swift.utils import get_dist_setting, get_logger, is_mp, is_unsloth_available, patch_getattr, use_torchacc
from .constant import ModelType
from .patcher import (patch_automodel, patch_automodel_for_sequence_classification, patch_get_dynamic_module,
                      patch_mp_ddp, patch_tp_plan)
from .utils import AttnImpl, HfConfigFactory, ModelInfo, safe_snapshot_download

GetModelTokenizerFunction = Callable[..., Tuple[Optional[PreTrainedModel], PreTrainedTokenizerBase]]
logger = get_logger()


@dataclass
class Model:
    ms_model_id: Optional[str] = None
    hf_model_id: Optional[str] = None
    model_path: Optional[str] = None

    ms_revision: Optional[str] = None
    hf_revision: Optional[str] = None


@dataclass
class ModelGroup:
    models: List[Model]

    # Higher priority. If set to None, the attributes of the ModelMeta will be used.
    ignore_patterns: Optional[List[str]] = None
    requires: Optional[List[str]] = None
    tags: List[str] = field(default_factory=list)

    def __post_init__(self):
        if not isinstance(self.models, (tuple, list)):
            self.models = [self.models]


@dataclass
class ModelMeta:
    model_type: Optional[str]
    # Used to list the model_ids from modelscope/huggingface,
    # which participate in the automatic inference of the model_type.
    model_groups: List[ModelGroup]
    template: Optional[str]
    get_function: GetModelTokenizerFunction

    model_arch: Optional[str] = None
    architectures: List[str] = field(default_factory=list)
    # Additional files that need to be saved for full parameter training/merge-lora.
    additional_saved_files: List[str] = field(default_factory=list)
    torch_dtype: Optional[torch.dtype] = None

    is_multimodal: bool = False
    is_reward: bool = False
    task_type: Optional[str] = None

    # File patterns to ignore when downloading the model.
    ignore_patterns: Optional[List[str]] = None
    # Usually specifies the version limits of transformers.
    requires: List[str] = field(default_factory=list)
    tags: List[str] = field(default_factory=list)

    def __post_init__(self):
        if self.template is None:
            self.template = 'dummy'
        if not isinstance(self.model_groups, (list, tuple)):
            self.model_groups = [self.model_groups]

    def get_matched_model_group(self, model_name: str) -> Optional[ModelGroup]:
        for model_group in self.model_groups:
            for model in model_group.models:
                for key in ['ms_model_id', 'hf_model_id', 'model_path']:
                    value = getattr(model, key)

                    if isinstance(value, str) and model_name == value.rsplit('/', 1)[-1].lower():
                        return model_group

    def check_requires(self, model_info=None):
        extra_requires = []
        if model_info and model_info.quant_method:
            mapping = {'bnb': ['bitsandbytes'], 'awq': ['autoawq'], 'gptq': ['auto_gptq'], 'aqlm': ['aqlm']}
            extra_requires += mapping.get(model_info.quant_method, [])
        requires = []
        for require in self.requires + extra_requires:
            try:
                require_version(require)
            except ImportError:
                requires.append(f'"{require}"')
        if requires:
            requires = ' '.join(requires)
            logger.warning(f'Please install the package: `pip install {requires} -U`.')


MODEL_MAPPING: Dict[str, ModelMeta] = {}


def register_model(model_meta: ModelMeta, *, exist_ok: bool = False) -> None:
    """
    model_type: The unique ID for the model type. Models with the same model_type share
        the same architectures, template, get_function, etc.
    """
    model_type = model_meta.model_type
    if not exist_ok and model_type in MODEL_MAPPING:
        raise ValueError(f'The `{model_type}` has already been registered in the MODEL_MAPPING.')
    from .constant import MLLMModelType, RMModelType
    if model_type in MLLMModelType.__dict__:
        model_meta.is_multimodal = True
    if model_type in RMModelType.__dict__:
        model_meta.is_reward = True
    MODEL_MAPPING[model_type] = model_meta


def load_by_unsloth(args):
    """Load model by unsloth"""
    assert is_unsloth_available(), 'please install unsloth if using `use_unsloth=True`: `pip install unsloth`'
    os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
    os.environ['UNSLOTH_DISABLE_STATISTICS'] = '1'
    model_info = args.model_info
    model_meta = args.model_meta
    if model_meta.is_multimodal:
        from unsloth import FastVisionModel as UnslothModel
    else:
        from unsloth import FastLanguageModel as UnslothModel
    model, processor = UnslothModel.from_pretrained(
        model_name=args.adapters and args.adapters[0] or args.model_dir,
        dtype=args.torch_dtype,
        max_seq_length=args.max_length,
        full_finetuning=args.quant_bits is None,
        load_in_4bit=args.quant_bits == 4,
        load_in_8bit=args.quant_bits == 8,
    )
    if isinstance(model, PeftModel):
        base_model = model.model
    else:
        base_model = model
    base_model.model_dir = args.model_dir
    base_model.model_info = model_info
    base_model.model_meta = model_meta
    processor.model_info = model_info
    processor.model_meta = model_meta
    return model, processor


def _patch_awq_compat(model_info):
    if version.parse(transformers.__version__) < version.parse('4.50') or model_info.quant_method != 'awq':
        return

    try:
        # compat transformers>=4.50 (autoawq)
        from transformers.quantizers.quantizer_awq import AwqQuantizer
        from transformers.integrations import get_keys_to_not_convert
        _process_model_before_weight_loading = AwqQuantizer._process_model_before_weight_loading

        def _new_process_model_before_weight_loading(self, model, *args, **kwargs):
            modules_to_not_convert = self.quantization_config.modules_to_not_convert
            if modules_to_not_convert is not None:
                self.quantization_config.modules_to_not_convert = list(
                    modules_to_not_convert) + get_keys_to_not_convert(model)
            return _process_model_before_weight_loading(self, model, *args, **kwargs)

        AwqQuantizer._process_model_before_weight_loading = _new_process_model_before_weight_loading
    except Exception:
        pass


def get_model_tokenizer_from_local(model_dir: str,
                                   model_info: ModelInfo,
                                   model_kwargs: Dict[str, Any],
                                   load_model: bool = True,
                                   *,
                                   tokenizer=None,
                                   model_config=None,
                                   automodel_class=None,
                                   **kwargs):
    """Load the model and tokenizer from the local model_dir."""
    if model_config is None:
        model_config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
    # fix prediction_step (internvl2, ovis, ...)
    if not hasattr(model_config, 'keys_to_ignore_at_inference'):
        model_config.keys_to_ignore_at_inference = []
    if 'past_key_values' not in model_config.keys_to_ignore_at_inference:
        model_config.keys_to_ignore_at_inference.append('past_key_values')

    torch_dtype = model_info.torch_dtype
    model_config.torch_dtype = torch_dtype
    HfConfigFactory.compat_zero3(model_config)
    rope_scaling = kwargs.get('rope_scaling')
    if rope_scaling:
        HfConfigFactory.set_config_attr(model_config, 'rope_scaling', rope_scaling)

    if tokenizer is None:
        tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)

    num_labels = model_info.num_labels or getattr(model_config, 'num_labels', None)
    if num_labels and model_info.task_type == 'seq_cls':
        model_info.num_labels = num_labels
        model_config.num_labels = num_labels

    model = None
    if load_model:
        _patch_awq_compat(model_info)
        logger.info(f'model_kwargs: {model_kwargs}')
        # fix seq_cls
        if model_info.task_type == 'seq_cls' and automodel_class is None:
            try:
                model = AutoModelForSequenceClassification.from_pretrained(
                    model_dir, config=model_config, torch_dtype=torch_dtype, trust_remote_code=True, **model_kwargs)
            except ValueError:
                model = None

        automodel_class = automodel_class or AutoModelForCausalLM
        model_meta = kwargs['model_meta']
        if model is None:
            if model_info.task_type == 'seq_cls' and not model_meta.is_reward:
                context = partial(patch_automodel_for_sequence_classification, model_meta=model_meta)
            elif model_info.task_type == 'seq_cls' and model_meta.is_reward and model_config.num_labels > 1:
                logger.warning('You are using a reward model for seq_cls task and num_labels > 1, '
                               'ignore_mismatched_sizes will be set to True')
                model_kwargs['ignore_mismatched_sizes'] = True
                context = partial(patch_automodel_for_sequence_classification, model_meta=model_meta)
            else:
                context = partial(patch_automodel, automodel_class=automodel_class, model_info=model_info)
            with context():
                model = automodel_class.from_pretrained(
                    model_dir, config=model_config, torch_dtype=torch_dtype, trust_remote_code=True, **model_kwargs)

        # fix not save modeling_xxx.py (transformers 4.45)
        # https://github.com/huggingface/transformers/issues/24737
        has_remote_code = hasattr(model_config, 'auto_map') and automodel_class.__name__ in model_config.auto_map
        if has_remote_code and model._auto_class is None:
            model._auto_class = automodel_class.__name__

        if model_info.task_type == 'embedding' and automodel_class.__name__ != 'AutoModel':
            from swift.llm.model.patcher import patch_output_normalizer
            patch_output_normalizer(model, model_meta=model_meta)

    model_info.config = model_config if model is None else model.config
    if model:
        # fix seq classification task
        pad_token_id = model.config.pad_token_id or tokenizer.pad_token_id
        HfConfigFactory.set_model_config_attr(model, 'pad_token_id', pad_token_id)
    return model, tokenizer


def get_model_tokenizer_with_flash_attn(model_dir: str,
                                        model_info: ModelInfo,
                                        model_kwargs: Dict[str, Any],
                                        load_model: bool = True,
                                        **kwargs):
    model_config = kwargs.get('model_config')
    if model_config is None:
        model_config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
    AttnImpl.update_attn_impl(model_config, kwargs.get('attn_impl'), kwargs.get('attn_impl_keys'))
    kwargs['model_config'] = model_config
    return get_model_tokenizer_from_local(model_dir, model_info, model_kwargs, load_model, **kwargs)


def get_model_tokenizer_multimodal(model_dir: str, *args, **kwargs):
    from transformers import AutoProcessor
    processor = AutoProcessor.from_pretrained(model_dir, trust_remote_code=True)
    kwargs['tokenizer'] = processor.tokenizer
    model, _ = get_model_tokenizer_with_flash_attn(model_dir, *args, **kwargs)
    return model, processor


def get_model_tokenizer_reward_model(model_dir, *args, **kwargs):
    model_config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
    if 'AutoModel' in (getattr(model_config, 'auto_map', None) or {}):
        kwargs['automodel_class'] = AutoModel
    return get_model_tokenizer_with_flash_attn(model_dir, *args, **kwargs)


def fix_do_sample_warning(generation_config: GenerationConfig) -> None:
    # Use the default values of temperature/top_p/top_k in generation_config.
    if generation_config.temperature == 0:
        generation_config.do_sample = False
    if generation_config.do_sample is False:
        generation_config.temperature = 1.
        generation_config.top_p = 1.
        generation_config.top_k = 50


def get_default_device_map():
    if is_deepspeed_zero3_enabled() or os.environ.get('ACCELERATE_USE_FSDP', 'False') == 'true':
        return None
    local_rank = get_dist_setting()[1]
    if local_rank == -1:
        local_rank = 0
    if is_torch_npu_available():
        return 'auto' if is_mp() else f'npu:{local_rank}'
    elif is_torch_mps_available():
        return f'mps:{local_rank}'
    elif is_torch_cuda_available():
        return 'auto' if is_mp() else f'cuda:{local_rank}'
    else:
        return 'cpu'


def get_default_torch_dtype(torch_dtype: Optional[torch.dtype]):
    # torch_dtype: torch_dtype in config.json
    if torch_dtype is not None:
        return torch_dtype

    try:
        is_bf16_available = is_torch_bf16_gpu_available() or (is_torch_npu_available()
                                                              and torch.npu.is_bf16_supported())
    except:  # noqa
        is_bf16_available = False

    if is_torch_cuda_available() or is_torch_npu_available():
        if is_bf16_available:
            return torch.bfloat16
        else:
            return torch.float16
    else:
        # cpu
        return torch.float32


def get_model_name(model_id_or_path: str) -> Optional[str]:
    """
    从模型路径或模型 ID 中提取模型名称。

    功能：
    - 针对不同的模型来源格式（如 Hugging Face Hub 或 ModelScope 的 snapshot 路径）进行兼容性处理；
    - 统一提取模型的名称，返回简洁的模型标识符，用于后续匹配或注册用途。

    参数:
        model_id_or_path (str): 模型的路径或 ID（如 'baichuan-inc/Baichuan2-7B' 或本地路径）

    返回:
        Optional[str]: 模型名称（如 'Baichuan2-7B'），若无法提取返回 None
    """

    # 确保传入的是字符串类型
    assert isinstance(model_id_or_path, str), f'model_id_or_path: {model_id_or_path}'

    # 去除路径末尾的斜杠（兼容性处理）
    model_id_or_path = model_id_or_path.rstrip('/')

    # 尝试匹配 Hugging Face 本地缓存路径中的模型名称
    # 如路径: .../models--baichuan-inc--Baichuan2-7B-Chat/snapshots/...
    match_ = re.search('/models--.+?--(.+?)/snapshots/', model_id_or_path)
    if match_ is not None:
        # 返回捕获的模型名称部分（如 Baichuan2-7B-Chat）
        return match_.group(1)

    # 否则从路径中提取最后一段（如 'path/to/model/Baichuan2-7B' => 'Baichuan2-7B'）
    model_name = model_id_or_path.rsplit('/', 1)[-1]

    # Windows 特殊处理：替换反斜杠分隔的路径
    if platform.system().lower() == 'windows':
        model_name = model_name.rsplit('\\', 1)[-1]

    # 兼容 ModelScope 的 snapshot 下载，替换多下划线为点
    # 如：baichuan___chat => baichuan.chat
    model_name = model_name.replace('___', '.')

    return model_name


def get_all_models() -> List[str]:
    use_hf = strtobool(os.environ.get('USE_HF', 'False'))
    models = []
    for model_type in ModelType.get_model_name_list():
        model_meta = MODEL_MAPPING.get(model_type)
        if model_meta:
            for group in model_meta.model_groups:
                for model in group.models:
                    if use_hf:
                        if model.hf_model_id:
                            models.append(model.hf_model_id)
                    else:
                        if model.ms_model_id:
                            models.append(model.ms_model_id)
    return models


def get_matched_model_meta(model_id_or_path: str) -> Optional[ModelMeta]:
    """
    根据模型路径或名称，匹配并返回对应的模型元信息（ModelMeta）。

    功能：
    - 根据传入的模型路径或 ID 获取模型名；
    - 在 MODEL_MAPPING 中查找与该模型名匹配的模型类型；
    - 如果找到匹配的模型组（model_group），则将其信息融合进对应的 ModelMeta 实例；
    - 返回更新后的 ModelMeta 对象，用于后续模型构建、任务推断、tokenizer 加载等。

    参数:
        model_id_or_path (str): 模型 ID 或路径（例如 'baichuan-inc/Baichuan2-7B-Chat'）

    返回:
        Optional[ModelMeta]: 匹配的模型元信息（若找不到匹配项则返回 None）
    """

    # 提取模型名称并统一为小写（便于匹配）
    model_name = get_model_name(model_id_or_path).lower()

    # 遍历所有预定义模型类型与其对应的元信息
    for model_type, model_meta in MODEL_MAPPING.items():

        # 检查当前模型是否与该 model_meta 所定义的模型组匹配
        model_group = ModelMeta.get_matched_model_group(model_meta, model_name)

        if model_group is not None:
            # 找到了匹配的模型组，深拷贝以避免修改全局注册表
            model_meta = deepcopy(model_meta)

            # 将匹配到的 group 中非 None 的属性值覆盖到新的 model_meta 上
            for k, v in asdict(model_group).items():
                if v is not None and k in model_meta.__dict__:
                    setattr(model_meta, k, v)

            # 返回更新后的模型元信息
            return model_meta


def _get_arch_mapping():
    res = {}
    for model_type, model_meta in MODEL_MAPPING.items():
        architectures = model_meta.architectures
        if not architectures:
            architectures.append('null')
        for arch in architectures:
            if arch not in res:
                res[arch] = []
            res[arch].append(model_type)
    return res


def get_matched_model_types(architectures: Optional[List[str]]) -> List[str]:
    """Get possible model_type."""
    architectures = architectures or ['null']
    if architectures:
        architectures = architectures[0]
    arch_mapping = _get_arch_mapping()
    return arch_mapping.get(architectures) or []


def _read_args_json_model_type(model_dir):
    if not os.path.exists(os.path.join(model_dir, 'args.json')):
        return
    from swift.llm import BaseArguments
    args = BaseArguments.from_pretrained(model_dir)
    return args.model_type


def _get_model_info(model_dir: str, model_type: Optional[str], quantization_config) -> ModelInfo:
    """
    获取模型配置信息，封装为 ModelInfo 对象。

    参数:
        model_dir (str): 模型的本地目录或路径
        model_type (Optional[str]): 模型类型（如果未提供，会尝试自动推断）
        quantization_config: 量化配置信息

    返回:
        ModelInfo: 包含模型类型、路径、精度、最大长度、量化方法等属性的信息对象
    """

    # 1. 尝试加载模型配置（带有 remote_code 支持）
    try:
        config = AutoConfig.from_pretrained(model_dir, trust_remote_code=True)
    except Exception:
        # 2. 如果失败，则用基本方式解析 config.json 文件
        config = PretrainedConfig.get_config_dict(model_dir)[0]

    # 3. 如果提供了量化配置，则设置到 config 对象中
    if quantization_config is not None:
        HfConfigFactory.set_config_attr(config, 'quantization_config', quantization_config)

    # 4. 获取量化信息（方法、位数等）
    quant_info = HfConfigFactory.get_quant_info(config) or {}

    # 5. 获取 PyTorch 使用的精度（如 float16、bfloat16）
    torch_dtype = HfConfigFactory.get_torch_dtype(config, quant_info)

    # 6. 获取模型支持的最大输入长度（例如 2048 或 8192）
    max_model_len = HfConfigFactory.get_max_model_len(config)

    # 7. 获取 Rope Scaling 配置（主要用于位置编码扩展）
    rope_scaling = HfConfigFactory.get_config_attr(config, 'rope_scaling')

    # 8. 如果未显式指定模型类型，尝试从 args.json 文件中读取
    if model_type is None:
        model_type = _read_args_json_model_type(model_dir)

    # 9. 如果仍未确定，则从 architectures 中推断模型类型
    if model_type is None:
        architectures = HfConfigFactory.get_config_attr(config, 'architectures')
        model_types = get_matched_model_types(architectures)
        # 如果推断出多个可能的模型类型，需要用户显式指定
        if len(model_types) > 1:
            raise ValueError('Please explicitly pass the model_type. For reference, '
                             f'the available model_types: {model_types}.')
        # 如果只推断出一个，就直接用它
        elif len(model_types) == 1:
            model_type = model_types[0]

    # 10. 如果推断的模型类型不在支持列表中，抛出异常
    elif model_type not in MODEL_MAPPING:
        raise ValueError(f"model_type: '{model_type}' not in {list(MODEL_MAPPING.keys())}")

    # 11. 将所有解析出的信息封装到 ModelInfo 对象中返回
    res = ModelInfo(
        model_type,                   # 模型类型
        model_dir,                    # 模型所在目录
        torch_dtype,                  # 精度类型
        max_model_len,                # 最大输入长度
        quant_info.get('quant_method'),  # 量化方法（例如 bitsandbytes、gptq）
        quant_info.get('quant_bits'),    # 量化比特数（例如 4、8）
        rope_scaling=rope_scaling        # Rope Scaling 参数
    )
    return res


def get_model_info_meta(
        model_id_or_path: str,
        torch_dtype: Optional[torch.dtype] = None,
        *,
        # hub 参数
        use_hf: Optional[bool] = None,
        hub_token: Optional[str] = None,
        revision: Optional[str] = None,
        download_model: bool = False,
        # 模型相关参数
        model_type: Optional[str] = None,
        quantization_config=None,
        task_type=None,
        num_labels=None,
        **kwargs) -> Tuple[ModelInfo, ModelMeta]:
    """
    获取模型的信息（ModelInfo）和元信息（ModelMeta）的统一入口。

    该方法根据模型 ID 或路径，结合预设的模型元数据和本地/远程配置，
    自动完成模型类型识别、torch_dtype 设置、任务类型推断（如 causal_lm 或 seq_cls）、
    模型结构加载、量化配置应用等功能，最终返回适配好的模型结构与元信息对象。

    参数说明：
        model_id_or_path (str): 模型名称或路径（可以是本地目录，也可以是 HuggingFace 的 ID）
        torch_dtype (torch.dtype): 指定模型使用的数据精度类型（例如 float16、bfloat16）
        use_hf (bool): 是否使用 HuggingFace 下载模型
        hub_token (str): 访问私有模型所需的 Token
        revision (str): 指定模型版本
        download_model (bool): 是否下载模型（为 False 时只解析元数据）
        model_type (str): 指定模型类型，如 llama、baichuan 等
        quantization_config: 量化相关配置（如 bits、group size）
        task_type (str): 模型任务类型（如 causal_lm、seq_cls）
        num_labels (int): 分类任务中标签数量
        kwargs: 其他参数

    返回:
        Tuple[ModelInfo, ModelMeta]: 分别为模型结构信息与模型元信息
    """

    # 获取与模型路径或 ID 匹配的元信息 ModelMeta（含 tokenizer 加载器、任务类型等）
    model_meta = get_matched_model_meta(model_id_or_path)

    # 使用安全方式下载模型（支持 HuggingFace 或本地路径），返回模型目录
    model_dir = safe_snapshot_download(
        model_id_or_path,
        revision=revision,
        download_model=download_model,
        use_hf=use_hf,
        ignore_patterns=getattr(model_meta, 'ignore_patterns', None),
        hub_token=hub_token)

    # 加载模型的详细结构信息，如 config、模型类型、torch_dtype 等
    model_info = _get_model_info(model_dir, model_type, quantization_config=quantization_config)

    # 如果未显式提供 model_type，则使用 model_info 推断的结果
    if model_type is None and model_info.model_type is not None:
        model_type = model_info.model_type
        logger.info(f'Setting model_type: {model_type}')

    # 如果尚未匹配到 model_meta，但已知 model_type，则使用 model_type 查找对应 meta
    if model_meta is None and model_type is not None:
        model_meta = MODEL_MAPPING[model_type]

    # 如果仍未匹配，则临时构造一个默认的 dummy 模型元信息（适配最简单模型）
    if model_meta is None:
        model_meta = ModelMeta(None, [], 'dummy', get_model_tokenizer_from_local, model_arch=None)
        logger.info(f'Temporarily create model_meta: {model_meta}')

    # 设置 torch 数据类型，如果未指定，则根据 meta 或默认值自动推断
    if torch_dtype is None:
        torch_dtype = model_meta.torch_dtype or get_default_torch_dtype(model_info.torch_dtype)
        logger.info(f'Setting torch_dtype: {torch_dtype}')
    model_info.torch_dtype = torch_dtype

    # 自动推断模型任务类型（如 causal_lm 或 seq_cls）
    if task_type is None:
        # 如果是 reward 模型，则强制设为回归（标签数为1）
        if model_meta.is_reward:
            num_labels = 1
        # 无标签默认使用 causal_lm，有标签则为 seq_cls（分类）
        if num_labels is None:
            task_type = 'causal_lm'
        else:
            task_type = 'seq_cls'
        # 分类模型必须提供标签数
        if task_type == 'seq_cls':
            assert num_labels is not None, 'Please pass the parameter `num_labels`.'
        # 如果 meta 指定了任务类型，则以其为准
        if model_meta.task_type is not None:
            task_type = model_meta.task_type

    model_info.task_type = task_type
    model_info.num_labels = num_labels

    # 校验模型结构是否满足 meta 的要求
    model_meta.check_requires(model_info)

    # 返回模型结构信息与元信息
    return model_info, model_meta



def get_model_tokenizer(
        model_id_or_path: str,  # 模型标识符（如'bert-base-uncased'）或本地路径
        torch_dtype: Optional[torch.dtype] = None,  # 模型权重数据类型（如torch.float16）
        device_map: Union[str, Dict[str, Any], None] = None,  # 设备分配策略
        *,
        load_model: bool = True,  # 是否加载模型权重
        # 模型仓库配置
        use_hf: Optional[bool] = None,  # 是否使用HuggingFace Hub（否则使用ModelScope）
        hub_token: Optional[str] = None,  # 私有模型的API令牌
        revision: Optional[str] = None,  # Git版本（分支/标签）
        download_model: Optional[bool] = None,  # 是否下载模型权重
        # 模型架构配置
        model_type: Optional[str] = None,  # 显式指定模型类型（当自动检测失败时）
        quantization_config=None,  # 量化配置（如4-bit量化）
        max_memory: Union[str, Dict[str, Any]] = None,  # GPU内存限制
        attn_impl: Literal['flash_attn', 'sdpa', 'eager', None] = None,  # 注意力机制实现方式
        rope_scaling: Optional[Dict[str, Any]] = None,  # RoPE缩放配置
        automodel_class=None,  # 自定义AutoModel类
        task_type: Literal['causal_lm', 'seq_cls'] = None,  # 下游任务类型
        num_labels: Optional[int] = None,  # 分类任务的类别数
        model_kwargs: Optional[Dict[str, Any]] = None,  # 额外的模型加载参数
        **kwargs) -> Tuple[Optional[PreTrainedModel], PreTrainedTokenizerBase]:
    """
    从HuggingFace Hub或ModelScope加载预训练模型和分词器。
    处理包括设备分配、量化和注意力优化等多种配置。
    """

    # 应用多进程/分布式训练的兼容性补丁
    patch_mp_ddp()

    # 初始化模型参数字典（如果未提供）
    if model_kwargs is None:
        model_kwargs = {}

    # 如果未指定download_model，则根据load_model设置默认值
    if download_model is None:
        download_model = load_model

    # 获取模型元信息和配置
    model_info, model_meta = get_model_info_meta(
        model_id_or_path,
        torch_dtype,
        use_hf=use_hf,
        hub_token=hub_token,
        revision=revision,
        download_model=download_model,
        model_type=model_type,
        quantization_config=quantization_config,
        task_type=task_type,
        num_labels=num_labels)

    # 设备分配逻辑（如果不使用TorchAccelerator且未指定device_map）
    if not use_torchacc() and device_map is None:
        device_map = get_default_device_map()  # 获取默认设备映射（如自动多GPU分配）
    model_kwargs['device_map'] = device_map

    # 设置量化配置（如果提供）
    if quantization_config:
        model_kwargs['quantization_config'] = quantization_config

    # 设置最大内存限制（如果提供）
    if max_memory:
        model_kwargs['max_memory'] = max_memory

    # 获取模型目录和加载函数
    model_dir = model_info.model_dir  # 模型本地存储目录
    get_function = model_meta.get_function  # 模型加载函数（如AutoModel.from_pretrained）

    # 准备额外的加载参数
    kwargs['automodel_class'] = automodel_class  # 自定义模型类
    kwargs['attn_impl'] = attn_impl  # 注意力实现方式
    kwargs['rope_scaling'] = rope_scaling  # RoPE缩放配置
    kwargs['model_meta'] = model_meta  # 模型元数据

    # 在动态模块补丁环境下加载模型和处理器
    with patch_get_dynamic_module(), patch_tp_plan():
        model, processor = get_function(model_dir, model_info, model_kwargs, load_model, **kwargs)

    # 处理分词器对象
    if not isinstance(processor, PreTrainedTokenizerBase) and hasattr(processor, 'tokenizer'):
        tokenizer = processor.tokenizer  # 从处理器中提取真正的分词器
        patch_getattr(processor.__class__, 'tokenizer')  # 应用属性访问补丁
    else:
        tokenizer = processor

    # 配置问题类型（用于分类任务）
    problem_type = kwargs.get('problem_type')
    if problem_type is None and model_info.num_labels == 1:
        problem_type = 'regression'  # 单标签默认为回归任务
    if problem_type is not None:
        model_info.config.problem_type = problem_type

    # 将模型元信息附加到分词器
    tokenizer.model_info = model_info
    tokenizer.model_meta = model_meta

    # 确保分词器包含必要的特殊标记
    pad_token = tokenizer.pad_token_id
    if pad_token is None:
        pad_token = tokenizer.eos_token_id  # 使用eos_token作为pad_token备用
    if tokenizer.eos_token_id is None:
        tokenizer.eos_token_id = pad_token  # 确保eos_token存在
    if tokenizer.pad_token_id is None:
        tokenizer.pad_token_id = pad_token  # 确保pad_token存在
    assert tokenizer.eos_token_id is not None  # 必须至少有一个终止标记
    assert tokenizer.pad_token_id is not None  # 必须有一个填充标记

    # 处理模型对象（如果加载了模型）
    if model is not None:
        # 附加模型元信息
        model.model_info = model_info
        model.model_meta = model_meta
        model.model_dir = model_dir

        # 加载生成配置（如果存在）
        generation_config_path = os.path.join(model_dir, 'generation_config.json')
        if not hasattr(model, 'generation_config') and os.path.isfile(generation_config_path):
            model.generation_config = GenerationConfig.from_pretrained(model_dir)

        # 修复Llama2等模型的do_sample警告
        if getattr(model, 'generation_config', None):
            fix_do_sample_warning(model.generation_config)

    return model, processor  # 返回模型和处理器（可能是分词器或包含分词器的处理器）