repo_id
stringlengths
15
86
file_path
stringlengths
27
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/prefix_tuning.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import dataclass, field import torch from ..config import PromptLearningConfig from ..utils import PeftType @dataclass class PrefixTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PrefixEncoder`]. Args: encoder_hidden_size (`int`): The hidden size of the prompt encoder. prefix_projection (`bool`): Whether to project the prefix embeddings. """ encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the encoder"}, ) prefix_projection: bool = field( default=False, metadata={"help": "Whether to project the prefix tokens"}, ) def __post_init__(self): self.peft_type = PeftType.PREFIX_TUNING # Based on https://github.com/THUDM/P-tuning-v2/blob/main/model/prefix_encoder.py # with some refactor class PrefixEncoder(torch.nn.Module): r""" The `torch.nn` model to encode the prefix. Args: config ([`PrefixTuningConfig`]): The configuration of the prefix encoder. Example: ```py >>> from peft import PrefixEncoder, PrefixTuningConfig >>> config = PrefixTuningConfig( ... peft_type="PREFIX_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... encoder_hidden_size=768, ... ) >>> prefix_encoder = PrefixEncoder(config) ``` **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder. - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if `prefix_projection` is `True`. - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings. Input shape: (`batch_size`, `num_virtual_tokens`) Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`) """ def __init__(self, config): super().__init__() self.prefix_projection = config.prefix_projection token_dim = config.token_dim num_layers = config.num_layers encoder_hidden_size = config.encoder_hidden_size num_virtual_tokens = config.num_virtual_tokens if self.prefix_projection and not config.inference_mode: # Use a two-layer MLP to encode the prefix self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim) self.transform = torch.nn.Sequential( torch.nn.Linear(token_dim, encoder_hidden_size), torch.nn.Tanh(), torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim), ) else: self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim) def forward(self, prefix: torch.Tensor): if self.prefix_projection: prefix_tokens = self.embedding(prefix) past_key_values = self.transform(prefix_tokens) else: past_key_values = self.embedding(prefix) return past_key_values
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/prompt_tuning.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum import math from dataclasses import dataclass, field from typing import Optional, Union import torch from ..config import PromptLearningConfig from ..utils import PeftType class PromptTuningInit(str, enum.Enum): TEXT = "TEXT" RANDOM = "RANDOM" @dataclass class PromptTuningConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEmbedding`]. Args: prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding. prompt_tuning_init_text (`str`, *optional*): The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`. tokenizer_name_or_path (`str`, *optional*): The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`. """ prompt_tuning_init: Union[PromptTuningInit, str] = field( default=PromptTuningInit.RANDOM, metadata={"help": "How to initialize the prompt tuning parameters"}, ) prompt_tuning_init_text: Optional[str] = field( default=None, metadata={ "help": "The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={ "help": "The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`" }, ) def __post_init__(self): self.peft_type = PeftType.PROMPT_TUNING class PromptEmbedding(torch.nn.Module): """ The model to encode virtual tokens into prompt embeddings. Args: config ([`PromptTuningConfig`]): The configuration of the prompt embedding. word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model. **Attributes**: - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding. Example: ```py >>> from peft import PromptEmbedding, PromptTuningConfig >>> config = PromptTuningConfig( ... peft_type="PROMPT_TUNING", ... task_type="SEQ_2_SEQ_LM", ... num_virtual_tokens=20, ... token_dim=768, ... num_transformer_submodules=1, ... num_attention_heads=12, ... num_layers=12, ... prompt_tuning_init="TEXT", ... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral", ... tokenizer_name_or_path="t5-base", ... ) >>> # t5_model.shared is the word embeddings of the base model >>> prompt_embedding = PromptEmbedding(config, t5_model.shared) ``` Input Shape: (`batch_size`, `total_virtual_tokens`) Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`) """ def __init__(self, config, word_embeddings): super().__init__() total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim) if config.prompt_tuning_init == PromptTuningInit.TEXT: from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path) init_text = config.prompt_tuning_init_text init_token_ids = tokenizer(init_text)["input_ids"] # Trim or iterate until num_text_tokens matches total_virtual_tokens num_text_tokens = len(init_token_ids) if num_text_tokens > total_virtual_tokens: init_token_ids = init_token_ids[:total_virtual_tokens] elif num_text_tokens < total_virtual_tokens: num_reps = math.ceil(total_virtual_tokens / num_text_tokens) init_token_ids = init_token_ids * num_reps init_token_ids = init_token_ids[:total_virtual_tokens] word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone() word_embedding_weights = word_embedding_weights.to(torch.float32) self.embedding.weight = torch.nn.Parameter(word_embedding_weights) def forward(self, indices): # Just get embeddings prompt_embeddings = self.embedding(indices) return prompt_embeddings
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/tuners/tuners_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import logging from abc import ABC, abstractmethod from typing import Any, Union from torch import nn from ..config import PeftConfig from ..utils import _get_submodules logger = logging.getLogger(__name__) class BaseTuner(nn.Module, ABC): r""" A base tuner model that provides the common methods and attributes for all tuners that are injectable into a torch.nn.Module For adding a new Tuner class, one needs to overwrite the following methods: - **_prepare_adapter_config**: A private method to eventually prepare the adapter config, for example in case the field `target_modules` is missing. - **_check_target_module_exists**: A helper private method to check if the passed module's key name matches any of the target modules in the adatper_config. - **_create_and_replace**: A private method to create and replace the target module with the adapter module. - **_check_target_module_exists**: A private helper method to check if the passed module's key name matches any of the target modules in the adatper_config. The easiest is to check what is done in the `peft.tuners.lora.LoraModel` class. Attributes: model (`torch.nn.Module`): The model to which the adapter tuner layers will be attached. forward (`Callable`): The forward method of the model. peft_config (`Union[`PeftConfig`, dict[str, PeftConfig]]`): The adapter configuration object, it should be a dictionary of `str` to `PeftConfig` objects. One can also pass a PeftConfig object and a new adapter will be created with the default name `adapter` or create a new dictionary with a key `adapter_name` and a value of that peft config. config (`dict[str, Any]`): The model configuration object, it should be a dictionary of `str` to `Any` objects. """ def __init__(self, model, peft_config: Union[PeftConfig, dict[str, PeftConfig]], adapter_name: str) -> None: super().__init__() self.model = model # For advanced developpers, if you want to attach multiple adapters to your # model, just add a `peft_config` dict attribute to your model. if not hasattr(self, "peft_config"): self.peft_config = {adapter_name: peft_config} if isinstance(peft_config, PeftConfig) else peft_config else: logger.info( "Already found a `peft_config` attribute in the model. This will lead to having multiple adapters" " in the model. Make sure to know what you are doing!" ) if isinstance(peft_config, PeftConfig): self.peft_config[adapter_name] = peft_config else: # user is adding a dict of PeftConfigs self.peft_config.update(peft_config) # transformers models have a .config attribute, whose presence is assumed later on if not hasattr(self, "config"): self.config = {"model_type": "custom"} self.inject_adapter(self.model, adapter_name) # Copy the peft_config in the injected model. self.model.peft_config = self.peft_config def forward(self, *args: Any, **kwargs: Any): return self.model.forward(*args, **kwargs) @abstractmethod def _prepare_adapter_config(self, peft_config: PeftConfig, model_config: dict) -> PeftConfig: r""" A private method to eventually prepare the adapter config. For transformers based models, if `peft_config.target_modules` is None, we can automatically infer the target modules from the `TRANSFORMERS_MODELS_TO_XXX_TARGET_MODULES_MAPPING`. This method can be further refactored in the future to automatically infer it for all tuner models. Check out `peft.tuner.lora.LoraModel._prepare_adapter_config` for an example. Args: peft_config (`str`): The adapter config. model_config (`str`): The transformers model config, that config should contain the `model_type` key. """ ... @abstractmethod def _check_target_module_exists(peft_config: PeftConfig, key: str) -> bool: r""" A helper private method to check if the passed module's key name matches any of the target modules in the `peft_config.target_modules` list. If it does, return `True`, else return `False`. Args: peft_config (`PeftConfig`): The adapter config. key (`str`): The module's key name. """ ... @abstractmethod def _create_and_replace( self, peft_config: PeftConfig, adapter_name: str, target: nn.Module, target_name: str, parent: nn.Module, **optionnal_kwargs: Any, ) -> None: r""" Inplace replacement of the target module with the adapter layer. This method needs to be overriden by all the tuner classes. Check `peft.tuners.lora.LoraModel._create_and_replace` for an example. Args: peft_config (`PeftConfig`): The adapter config. adapter_name (`str`): The adapter name. target (`nn.Module`): The target module. target_name (`str`): The target module's name. parent (`nn.Module`): The parent module. **optionnal_kwargs (`dict`): The optional keyword arguments to pass to deal with particular cases (e.g. 8bit, 4bit quantization) """ ... @abstractmethod def _mark_only_adapters_as_trainable(self): r""" A helper method to mark only the adapter layers as trainable (i.e. module.requires_grad = False) This needs to be overriden for all tuner classes to match the correct key names. Check `peft.tuners.lora.LoraModel._mark_only_adapters_as_trainable` for an example. """ ... def _check_new_adapter_config(self, config: PeftConfig) -> None: """ A helper method to check the config when a new adapter is being added. Raise a ValueError if there is something wrong with the config or if it conflicts with existing adapters. """ pass def inject_adapter(self, model: nn.Module, adapter_name: str): r""" Creates adapter layers and replaces the target modules with the adapter layers. This method is called under the hood by `peft.mapping.get_peft_model` if a non-prompt tuning adapter class is passed. The corresponding PEFT config is directly retrieved from the `peft_config` attribute of the BaseTuner class. Args: model (`nn.Module`): The model to be tuned. adapter_name (`str`): The adapter name. """ peft_config = self.peft_config[adapter_name] # Note: If possible, all checks should be performed *at the start of this method*. # This way, we can raise early if something goes wrong, without leaving the model # in a bad (half-initialized) state. self._check_new_adapter_config(peft_config) is_target_modules_in_base_model = False key_list = [key for key, _ in model.named_modules()] model_config = getattr(model, "config", {"model_type": "custom"}) if hasattr(model_config, "to_dict"): model_config = model_config.to_dict() peft_config = self._prepare_adapter_config(peft_config, model_config) for key in key_list: if not self._check_target_module_exists(peft_config, key): continue is_target_modules_in_base_model = True parent, target, target_name = _get_submodules(model, key) optionnal_kwargs = { "loaded_in_8bit": getattr(model, "is_loaded_in_8bit", False), "loaded_in_4bit": getattr(model, "is_loaded_in_4bit", False), "current_key": key, } self._create_and_replace(peft_config, adapter_name, target, target_name, parent, **optionnal_kwargs) if not is_target_modules_in_base_model: raise ValueError( f"Target modules {peft_config.target_modules} not found in the base model. " f"Please check the target modules and try again." ) self._mark_only_adapters_as_trainable() if self.peft_config[adapter_name].inference_mode: for n, p in self.model.named_parameters(): if adapter_name in n: p.requires_grad = False def merge_adapter(self): """ This method merges the LoRa layers into the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): module.merge() def unmerge_adapter(self): """ This method unmerges the LoRa layers from the base model. """ for module in self.model.modules(): if isinstance(module, BaseTunerLayer): module.unmerge() class BaseTunerLayer(ABC): r""" A tuner layer mixin that provides the common methods and attributes for all tuners. Args: is_plugable (`bool`, *optional*): Whether the adapter layer can be plugged to any pytorch module active_adapter (`str`, *optional*): The name of the active adapter. """ active_adapter = None def merge(self): raise NotImplementedError def unmerge(self): raise NotImplementedError
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/__init__.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from .config import PeftConfig, PeftType, PromptLearningConfig, TaskType from .peft_types import PeftType, TaskType from .other import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING, TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING, COMMON_LAYERS_PATTERN, CONFIG_NAME, WEIGHTS_NAME, SAFETENSORS_WEIGHTS_NAME, CLAMP_QUANTILE, _set_trainable, add_library_to_model_card, bloom_model_postprocess_past_key_value, prepare_model_for_int8_training, prepare_model_for_kbit_training, shift_tokens_right, transpose, _get_batch_size, _get_submodules, _set_adapter, _freeze_adapter, ModulesToSaveWrapper, _prepare_prompt_learning_config, _is_valid_match, infer_device, get_auto_gptq_quant_linear, get_quantization_config, ) from .hub_utils import hub_file_exists from .save_and_load import get_peft_model_state_dict, set_peft_model_state_dict, load_peft_weights
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/hub_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from huggingface_hub import get_hf_file_metadata, hf_hub_url from huggingface_hub.utils import EntryNotFoundError def hub_file_exists(repo_id: str, filename: str, revision: str = None, repo_type: str = None) -> bool: r""" Checks if a file exists in a remote Hub repository. """ url = hf_hub_url(repo_id=repo_id, filename=filename, repo_type=repo_type, revision=revision) try: get_hf_file_metadata(url) return True except EntryNotFoundError: return False
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/other.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import os import warnings from typing import Optional import accelerate import torch from accelerate.hooks import add_hook_to_module, remove_hook_from_module from accelerate.utils import is_npu_available, is_xpu_available from ..import_utils import is_auto_gptq_available # Get current device name based on available devices def infer_device(): if torch.cuda.is_available(): torch_device = "cuda" elif is_xpu_available(): torch_device = "xpu" elif is_npu_available(): torch_device = "npu" else: torch_device = "cpu" return torch_device # Add or edit model card to have `library_name: peft` def add_library_to_model_card(output_dir): if os.path.exists(os.path.join(output_dir, "README.md")): with open(os.path.join(output_dir, "README.md"), "r") as f: lines = f.readlines() # check if the first line is `---` if len(lines) > 0 and lines[0].startswith("---"): for i, line in enumerate(lines[1:]): # check if line starts with `library_name`, if yes, update it if line.startswith("library_name"): lines[i + 1] = "library_name: peft\n" break elif line.startswith("---"): # insert `library_name: peft` before the last `---` lines.insert(i + 1, "library_name: peft\n") break else: lines = ["---\n", "library_name: peft\n", "---\n"] + lines else: lines = ["---\n", "library_name: peft\n", "---\n"] # write the lines back to README.md with open(os.path.join(output_dir, "README.md"), "w") as f: f.writelines(lines) # needed for prefix-tuning of bloom model def bloom_model_postprocess_past_key_value(past_key_values): past_key_values = torch.cat(past_key_values) total_layers, batch_size, num_attention_heads, num_virtual_tokens, head_dim = past_key_values.shape keys = past_key_values[: total_layers // 2] keys = keys.transpose(2, 3).reshape( total_layers // 2, batch_size * num_attention_heads, head_dim, num_virtual_tokens ) values = past_key_values[total_layers // 2 :] values = values.reshape(total_layers // 2, batch_size * num_attention_heads, num_virtual_tokens, head_dim) return tuple(zip(keys, values)) def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True): r""" This method wraps the entire protocol for preparing a model before running a training. This includes: 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm head to fp32 Args: model, (`transformers.PreTrainedModel`): The loaded model from `transformers` """ loaded_in_kbit = getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) is_gptq_quantized = getattr(model, "quantization_method", None) == "gptq" for name, param in model.named_parameters(): # freeze base model's layers param.requires_grad = False if not is_gptq_quantized: # cast all non INT8 parameters to fp32 for param in model.parameters(): if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16): param.data = param.data.to(torch.float32) if (loaded_in_kbit or is_gptq_quantized) and use_gradient_checkpointing: # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) # enable gradient checkpointing for memory efficiency model.gradient_checkpointing_enable() return model # For backward compatibility def prepare_model_for_int8_training(*args, **kwargs): warnings.warn( "prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.", FutureWarning, ) return prepare_model_for_kbit_training(*args, **kwargs) # copied from transformers.models.bart.modeling_bart def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids pad_token_id (`int`): The id of the `padding` token. decoder_start_token_id (`int`): The id of the `start` token. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids class ModulesToSaveWrapper(torch.nn.Module): def __init__(self, module_to_save, adapter_name): super().__init__() self.original_module = module_to_save self.modules_to_save = torch.nn.ModuleDict({}) self.update(adapter_name) self.active_adapter = adapter_name self.disable_adapters = False def update(self, adapter_name): self.modules_to_save.update(torch.nn.ModuleDict({adapter_name: copy.deepcopy(self.original_module)})) if hasattr(self.modules_to_save[adapter_name], "_hf_hook"): old_hook = self.modules_to_save[adapter_name]._hf_hook new_hook = self._create_new_hook(old_hook) remove_hook_from_module(self.modules_to_save[adapter_name]) add_hook_to_module(self.modules_to_save[adapter_name], new_hook) def _create_new_hook(self, old_hook): r""" Creates a new hook based on the old hook. Use it only if you know what you are doing ! """ old_hook_cls = getattr(accelerate.hooks, old_hook.__class__.__name__) old_hook_attr = old_hook.__dict__ filtered_old_hook_attr = {} old_hook_init_signature = inspect.signature(old_hook_cls.__init__) for k in old_hook_attr.keys(): if k in old_hook_init_signature.parameters: filtered_old_hook_attr[k] = old_hook_attr[k] new_hook = old_hook_cls(**filtered_old_hook_attr) return new_hook def forward(self, *args, **kwargs): if self.disable_adapters or (self.active_adapter not in self.modules_to_save): return self.original_module(*args, **kwargs) return self.modules_to_save[self.active_adapter](*args, **kwargs) def _get_submodules(model, key): parent = model.get_submodule(".".join(key.split(".")[:-1])) target_name = key.split(".")[-1] target = model.get_submodule(key) return parent, target, target_name def _freeze_adapter(model, adapter_name): for n, p in model.named_parameters(): if adapter_name in n: p.requires_grad = False def _set_trainable(model, adapter_name): key_list = [key for key, _ in model.named_modules()] for key in key_list: target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save) if target_module_found: parent, target, target_name = _get_submodules(model, key) if isinstance(target, ModulesToSaveWrapper): target.update(adapter_name) else: for param in target.parameters(): param.requires_grad = True setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name)) def _set_adapter(model, adapter_name): for module in model.modules(): if isinstance(module, ModulesToSaveWrapper): module.active_adapter = adapter_name def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", peft_config.token_dim) return peft_config def fsdp_auto_wrap_policy(model): import functools import os from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.wrap import _or_policy, lambda_auto_wrap_policy, transformer_auto_wrap_policy from ..tuners import PrefixEncoder, PromptEmbedding, PromptEncoder def lambda_policy_fn(module): if ( len(list(module.named_children())) == 0 and getattr(module, "weight", None) is not None and module.weight.requires_grad ): return True return False lambda_policy = functools.partial(lambda_auto_wrap_policy, lambda_fn=lambda_policy_fn) transformer_wrap_policy = functools.partial( transformer_auto_wrap_policy, transformer_layer_cls=( PrefixEncoder, PromptEncoder, PromptEmbedding, FullyShardedDataParallelPlugin.get_module_class_from_name( model, os.environ.get("FSDP_TRANSFORMER_CLS_TO_WRAP", "") ), ), ) auto_wrap_policy = functools.partial(_or_policy, policies=[lambda_policy, transformer_wrap_policy]) return auto_wrap_policy def transpose(weight, fan_in_fan_out): return weight.T if fan_in_fan_out else weight def _is_valid_match(key: str, target_key: str): """ Helper function to match module names target_key and key. Makes sure that either the key is exactly the target_key or the target_key is a submodule of key """ if key.endswith(target_key): if len(key) > len(target_key): return key.endswith("." + target_key) # must be a sub module return True return False def _get_batch_size(input_ids: Optional[torch.Tensor], inputs_embeds: Optional[torch.Tensor]) -> int: """Get the batch size based on either input_ids or input_embeds Raises an ValueError if both are None. """ if (input_ids is None) and (inputs_embeds is None): raise ValueError("You have to provide either input_ids or inputs_embeds") if input_ids is not None: batch_size = input_ids.shape[0] else: batch_size = inputs_embeds.shape[0] return batch_size def get_quantization_config(model: torch.nn.Module, method: str): """ Get the quantization config of the related quantization method """ if ( hasattr(model, "config") and hasattr(model.config, "quantization_config") and (getattr(model, "quantization_method", None) == method) ): return model.config.quantization_config return None def get_auto_gptq_quant_linear(gptq_quantization_config): """ Get the right AutoGPTQQuantLinear class based on the quantization config file """ if is_auto_gptq_available(): from auto_gptq.utils.import_utils import dynamically_import_QuantLinear if gptq_quantization_config is not None: desc_act = gptq_quantization_config.desc_act group_size = gptq_quantization_config.group_size bits = gptq_quantization_config.bits disable_exllama = gptq_quantization_config.disable_exllama AutoGPTQQuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=desc_act, group_size=group_size, bits=bits, disable_exllama=disable_exllama, ) return AutoGPTQQuantLinear return None TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING = { "t5": ["q", "v"], "mt5": ["q", "v"], "bart": ["q_proj", "v_proj"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "blip-2": ["q", "v", "q_proj", "v_proj"], "opt": ["q_proj", "v_proj"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "value"], "xlm-roberta": ["query", "value"], "electra": ["query", "value"], "deberta-v2": ["query_proj", "value_proj"], "deberta": ["in_proj"], "layoutlm": ["query", "value"], "llama": ["q_proj", "v_proj"], "chatglm": ["query_key_value"], "gpt_bigcode": ["c_attn"], "mpt": ["Wqkv"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], "btlm": ["c_proj", "c_attn"], "codegen": ["qkv_proj"], } TRANSFORMERS_MODELS_TO_IA3_TARGET_MODULES_MAPPING = { "t5": ["k", "v", "wo"], "mt5": ["k", "v", "wi_1"], "gpt2": ["c_attn", "mlp.c_proj"], "bloom": ["query_key_value", "mlp.dense_4h_to_h"], "roberta": ["key", "value", "output.dense"], "opt": ["q_proj", "k_proj", "fc2"], "gptj": ["q_proj", "v_proj", "fc_out"], "gpt_neox": ["query_key_value", "dense_4h_to_h"], "gpt_neo": ["q_proj", "v_proj", "c_proj"], "bart": ["q_proj", "v_proj", "fc2"], "gpt_bigcode": ["c_attn", "mlp.c_proj"], "llama": ["k_proj", "v_proj", "down_proj"], "bert": ["key", "value", "output.dense"], "deberta-v2": ["key_proj", "value_proj", "output.dense"], "deberta": ["in_proj", "output.dense"], "RefinedWebModel": ["query_key_value"], "RefinedWeb": ["query_key_value"], "falcon": ["query_key_value"], } TRANSFORMERS_MODELS_TO_IA3_FEEDFORWARD_MODULES_MAPPING = { "t5": ["wo"], "mt5": [], "gpt2": ["mlp.c_proj"], "bloom": ["mlp.dense_4h_to_h"], "roberta": ["output.dense"], "opt": ["fc2"], "gptj": ["fc_out"], "gpt_neox": ["dense_4h_to_h"], "gpt_neo": ["c_proj"], "bart": ["fc2"], "gpt_bigcode": ["mlp.c_proj"], "llama": ["down_proj"], "bert": ["output.dense"], "deberta-v2": ["output.dense"], "deberta": ["output.dense"], "RefinedWeb": ["query_key_value"], "RefinedWebModel": ["query_key_value"], "falcon": ["query_key_value"], } COMMON_LAYERS_PATTERN = ["layers", "h", "block", "blocks", "layer"] TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING = { "t5": ["q", "k", "v", "o", "wi", "wo"], "mt5": ["q", "k", "v", "o", "wi_0", "wi_1", "wo"], "bart": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gpt2": ["c_attn"], "bloom": ["query_key_value"], "opt": ["q_proj", "k_proj", "v_proj", "out_proj", "fc1", "fc2"], "gptj": ["q_proj", "v_proj"], "gpt_neox": ["query_key_value"], "gpt_neo": ["q_proj", "v_proj"], "llama": ["q_proj", "v_proj"], "bert": ["query", "value"], "roberta": ["query", "key", "value", "dense"], # "xlm-roberta": ["query", "value"], # "electra": ["query", "value"], "deberta-v2": ["query_proj", "key_proj", "value_proj", "dense"], "gpt_bigcode": ["c_attn"], "deberta": ["in_proj"], # "layoutlm": ["query", "value"], } TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = { "bloom": bloom_model_postprocess_past_key_value, } WEIGHTS_NAME = "adapter_model.bin" SAFETENSORS_WEIGHTS_NAME = "adapter_model.safetensors" CONFIG_NAME = "adapter_config.json" CLAMP_QUANTILE = 0.99
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/peft_types.py
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum class PeftType(str, enum.Enum): PROMPT_TUNING = "PROMPT_TUNING" P_TUNING = "P_TUNING" PREFIX_TUNING = "PREFIX_TUNING" LORA = "LORA" ADALORA = "ADALORA" ADAPTION_PROMPT = "ADAPTION_PROMPT" IA3 = "IA3" class TaskType(str, enum.Enum): SEQ_CLS = "SEQ_CLS" SEQ_2_SEQ_LM = "SEQ_2_SEQ_LM" CAUSAL_LM = "CAUSAL_LM" TOKEN_CLS = "TOKEN_CLS" QUESTION_ANS = "QUESTION_ANS" FEATURE_EXTRACTION = "FEATURE_EXTRACTION"
0
hf_public_repos/peft/src/peft
hf_public_repos/peft/src/peft/utils/save_and_load.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from typing import Optional import torch from huggingface_hub import hf_hub_download from huggingface_hub.utils import EntryNotFoundError from safetensors.torch import load_file as safe_load_file from .hub_utils import hub_file_exists from .other import SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, infer_device from .peft_types import PeftType def get_peft_model_state_dict(model, state_dict=None, adapter_name="default"): """ Get the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP, the model should be the underlying model/unwrapped model (i.e. model.module). state_dict (`dict`, *optional*, defaults to `None`): The state dict of the model. If not provided, the state dict of the model will be used. """ config = model.peft_config[adapter_name] if state_dict is None: state_dict = model.state_dict() if config.peft_type in (PeftType.LORA, PeftType.ADALORA): # to_return = lora_state_dict(model, bias=model.peft_config.bias) # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py` # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP bias = config.bias if bias == "none": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k} elif bias == "all": to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k} elif bias == "lora_only": to_return = {} for k in state_dict: if "lora_" in k: to_return[k] = state_dict[k] bias_name = k.split("lora_")[0] + "bias" if bias_name in state_dict: to_return[bias_name] = state_dict[bias_name] else: raise NotImplementedError to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))} if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()} config.rank_pattern = rank_pattern to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name) elif config.peft_type == PeftType.ADAPTION_PROMPT: to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")} elif config.is_prompt_learning: to_return = {} if config.inference_mode: prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight else: prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name) to_return["prompt_embeddings"] = prompt_embeddings elif config.peft_type == PeftType.IA3: to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k} else: raise NotImplementedError if getattr(model, "modules_to_save", None) is not None: for key, value in state_dict.items(): if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save): to_return[key.replace("modules_to_save.", "")] = value to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()} return to_return def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name="default"): """ Set the state dict of the Peft model. Args: model ([`PeftModel`]): The Peft model. peft_model_state_dict (`dict`): The state dict of the Peft model. """ config = model.peft_config[adapter_name] state_dict = {} if getattr(model, "modules_to_save", None) is not None: for key, value in peft_model_state_dict.items(): if any(module_name in key for module_name in model.modules_to_save): for module_name in model.modules_to_save: if module_name in key: key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}") break state_dict[key] = value else: state_dict = peft_model_state_dict if config.peft_type in (PeftType.LORA, PeftType.ADALORA, PeftType.IA3): peft_model_state_dict = {} parameter_prefix = "ia3_" if config.peft_type == PeftType.IA3 else "lora_" for k, v in state_dict.items(): if parameter_prefix in k: suffix = k.split(parameter_prefix)[1] if "." in suffix: suffix_to_replace = ".".join(suffix.split(".")[1:]) k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}") else: k = f"{k}.{adapter_name}" peft_model_state_dict[k] = v else: peft_model_state_dict[k] = v if config.peft_type == PeftType.ADALORA: rank_pattern = config.rank_pattern if rank_pattern is not None: model.resize_modules_by_rank_pattern(rank_pattern, adapter_name) elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT: peft_model_state_dict = state_dict else: raise NotImplementedError load_result = model.load_state_dict(peft_model_state_dict, strict=False) if config.is_prompt_learning: model.prompt_encoder[adapter_name].embedding.load_state_dict( {"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True ) return load_result def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict: r""" A helper method to load the PEFT weights from the HuggingFace Hub or locally Args: model_id (`str`): The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub. device (`str`): The device to load the weights onto. hf_hub_download_kwargs (`dict`): Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub. """ path = ( os.path.join(model_id, hf_hub_download_kwargs["subfolder"]) if hf_hub_download_kwargs.get("subfolder", None) is not None else model_id ) if device is None: device = infer_device() if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)): filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME) use_safetensors = True elif os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) use_safetensors = False else: has_remote_safetensors_file = hub_file_exists( model_id, SAFETENSORS_WEIGHTS_NAME, revision=hf_hub_download_kwargs.get("revision", None), repo_type=hf_hub_download_kwargs.get("repo_type", None), ) use_safetensors = has_remote_safetensors_file if has_remote_safetensors_file: # Priority 1: load safetensors weights filename = hf_hub_download( model_id, SAFETENSORS_WEIGHTS_NAME, **hf_hub_download_kwargs, ) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs) except EntryNotFoundError: raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}." ) if use_safetensors: adapters_weights = safe_load_file(filename, device=device) else: adapters_weights = torch.load(filename, map_location=torch.device(device)) return adapters_weights
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_adaption_prompt.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import os import tempfile import unittest from unittest import TestCase import torch from torch.testing import assert_close from peft.mapping import get_peft_model from peft.peft_model import PeftModel from peft.tuners.adaption_prompt import AdaptionPromptConfig from peft.utils.other import prepare_model_for_int8_training from peft.utils.save_and_load import get_peft_model_state_dict from tests.testing_common import PeftCommonTester def is_llama_available() -> bool: """Check if Llama is available in the transformers library (it's not in earlier versions).""" try: return importlib.util.find_spec("transformers.models.llama.modeling_llama") is not None except ModuleNotFoundError: return False if is_llama_available(): # We guard the import statement so that our unit tests will pass in CI environments # that don't have a transformers package with Llama. from transformers import LlamaConfig, LlamaForCausalLM, LlamaModel class AdaptionPromptTester(TestCase, PeftCommonTester): """ Tests for the AdaptionPrompt model. Some of these tests were adapted from `test_peft_model.py` (which has been refactored since), but since we haven't checked in the test checkpoints for Llama into `hf-internal-testing`, we separate them for now. """ def setUp(self): """Check that llama is available in transformers package before running each test.""" if not is_llama_available(): self.skipTest("Llama not available in transformers. Skipping test.") @staticmethod def _create_test_llama_config(): """Create a test config for a small Llama model for testing.""" return LlamaConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, use_cache=False, ) def test_attributes(self) -> None: model = LlamaModel(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4) model = get_peft_model(model, config) self.assertTrue(hasattr(model, "save_pretrained")) self.assertTrue(hasattr(model, "from_pretrained")) self.assertTrue(hasattr(model, "push_to_hub")) def test_prepare_for_training(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) self.assertTrue(not dummy_output.requires_grad) def test_prepare_for_int8_training(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) model = prepare_model_for_int8_training(model) model = model.to(self.torch_device) for param in model.parameters(): self.assertTrue(not param.requires_grad) config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) dummy_output = model.get_input_embeddings()(dummy_input) self.assertTrue(dummy_output.requires_grad) def test_save_pretrained(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys self.assertEqual(state_dict.keys(), state_dict_from_pretrained.keys()) # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). self.assertEqual(len(list(state_dict.keys())), 4) # check if tensors equal for key in state_dict.keys(): self.assertTrue( torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) ) # check if `adapter_model.bin` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))) # check if `adapter_config.json` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))) # check if `pytorch_model.bin` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin"))) # check if `config.json` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json"))) def test_save_pretrained_selected_adapters(self) -> None: seed = 420 torch.manual_seed(seed) model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) torch.manual_seed(seed) model_from_pretrained = LlamaForCausalLM(self._create_test_llama_config()) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) model_from_pretrained.load_adapter(tmp_dirname, "new_adapter") # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys self.assertEqual(state_dict.keys(), state_dict_from_pretrained.keys()) # Check that the number of saved parameters is 4 -- 2 layers of (tokens and gate). self.assertEqual(len(list(state_dict.keys())), 4) # check if tensors equal for key in state_dict.keys(): self.assertTrue( torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) ) # check if `adapter_model.bin` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))) # check if `adapter_config.json` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))) # check if `pytorch_model.bin` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin"))) # check if `config.json` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json"))) def test_generate(self) -> None: model = LlamaForCausalLM(self._create_test_llama_config()) config = AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) with self.assertRaises(TypeError): # check if `generate` raises an error if no positional arguments are passed _ = model.generate(input_ids, attention_mask=attention_mask) def test_sequence_adapter_ops(self) -> None: """Test sequence of adapter operations.""" # Test input data. input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # Create original llama model. original = LlamaForCausalLM(self._create_test_llama_config()) original = original.to(self.torch_device) original_before = original(input_ids=input_ids, attention_mask=attention_mask) # Get AdaptionPrompt model. adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) default_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) # Test zero-init: The logits should be exactly the same. assert_close(original_before.logits, default_before.logits, rtol=0, atol=0) # Single fine-tuning step on "default" adapter. optimizer = torch.optim.SGD(adapted.parameters(), lr=1) optimizer.zero_grad() default_before.loss.backward() optimizer.step() # Test that the output changed. default_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) self.assertFalse(torch.allclose(default_before.logits, default_after.logits)) with adapted.disable_adapter(): # Test that the output is the same as the original ouput. default_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, default_disabled.logits, rtol=0, atol=0) # Add new adapter 1. adapted.add_adapter("adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM")) # Test zero-init adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) # Single fine-tuning step on adapter 1. optimizer = torch.optim.SGD(adapted.parameters(), lr=1) optimizer.zero_grad() adapter_1_before.loss.backward() optimizer.step() # Test that adapter 1 output changed. adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) self.assertFalse(torch.allclose(adapter_1_before.logits, adapter_1_after.logits)) self.assertFalse(torch.allclose(original_before.logits, adapter_1_after.logits)) self.assertFalse(torch.allclose(default_after.logits, adapter_1_after.logits)) with adapted.disable_adapter(): # Test that the output is the same as the original output. adapter_1_disabled = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_disabled.logits, rtol=0, atol=0) # Set adapter back to default. adapted.set_adapter("default") # Test that the output is the same as the default output after training. default_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(default_after.logits, default_after_set.logits, rtol=0, atol=0) self.assertFalse(torch.allclose(original_before.logits, default_after_set.logits)) self.assertFalse(torch.allclose(adapter_1_after.logits, default_after_set.logits)) def test_add_and_set_while_disabled(self): """Test that adding and setting adapters while disabled works as intended.""" # Test input data. input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) target_ids = torch.LongTensor([[0, 0, 0], [0, 0, 0]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # Create original llama model. original = LlamaForCausalLM(self._create_test_llama_config()) original = original.to(self.torch_device) original_before = original(input_ids=input_ids, attention_mask=attention_mask) # Get AdaptionPrompt model. adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) with adapted.disable_adapter(): adapted.add_adapter( "adapter 1", AdaptionPromptConfig(adapter_layers=3, adapter_len=8, task_type="CAUSAL_LM") ) # Test that the output is the same as the original output. adapter_1_before = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(original_before.logits, adapter_1_before.logits, rtol=0, atol=0) # Single fine-tuning step on adapter 1. optimizer = torch.optim.SGD(adapted.parameters(), lr=1) optimizer.zero_grad() adapter_1_before.loss.backward() optimizer.step() # Test that adapter 1 output changed. adapter_1_after = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) self.assertFalse(torch.allclose(original_before.logits, adapter_1_after.logits)) adapted.set_adapter("default") with adapted.disable_adapter(): adapted.set_adapter("adapter 1") # Test that adapter 1 is active again. adapter_1_after_set = adapted(input_ids=input_ids, attention_mask=attention_mask, labels=target_ids) assert_close(adapter_1_after.logits, adapter_1_after_set.logits, rtol=0, atol=0) def test_use_cache(self) -> None: """Test that AdaptionPrompt works when Llama config use_cache=True.""" input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) original = LlamaForCausalLM( LlamaConfig( vocab_size=16, hidden_size=8, intermediate_size=8, num_hidden_layers=8, num_attention_heads=4, use_cache=False, ) ) adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) expected = adapted.generate(input_ids=input_ids, max_length=8) # Set use_cache = True and generate output again. adapted.base_model.config.use_cache = True actual = adapted.generate(input_ids=input_ids, max_length=8) assert_close(expected, actual, rtol=0, atol=0) def test_bf16_inference(self) -> None: """Test that AdaptionPrompt works when Llama using a half-precision model.""" input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) original = LlamaForCausalLM.from_pretrained( "trl-internal-testing/tiny-random-LlamaForCausalLM", torch_dtype=torch.bfloat16 ) adapted = get_peft_model( original, AdaptionPromptConfig(adapter_layers=2, adapter_len=4, task_type="CAUSAL_LM") ) adapted = adapted.to(self.torch_device) _ = adapted.generate(input_ids=input_ids) @unittest.expectedFailure def test_disable_adapter(self): llama_config = self._create_test_llama_config() model = LlamaForCausalLM(llama_config).to(self.torch_device) dummy_input = torch.LongTensor([[1, 1, 1]]).to(self.torch_device) output_before = model(dummy_input).logits config = AdaptionPromptConfig(adapter_layers=1, adapter_len=4, task_type="CAUSAL_LM") model = get_peft_model(model, config).to(self.torch_device) output_peft = model(dummy_input).logits # TODO currently this fails because scores are zeroed out: # https://github.com/huggingface/peft/blob/062d95a09eb5d1de35c0e5e23d4387daba99e2db/src/peft/tuners/adaption_prompt.py#L303 # This is fine for users but makes it difficult to test if anything happens. In the future, we will have a clean # way to control initialization. Until then, this test is expected to fail. self.assertFalse(torch.allclose(output_before, output_peft)) with model.disable_adapter(): output_peft_disabled = model(dummy_input).logits self.assertTrue(torch.allclose(output_before, output_peft_disabled))
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_auto.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import torch from peft import ( AutoPeftModel, AutoPeftModelForCausalLM, AutoPeftModelForFeatureExtraction, AutoPeftModelForQuestionAnswering, AutoPeftModelForSeq2SeqLM, AutoPeftModelForSequenceClassification, AutoPeftModelForTokenClassification, PeftModel, PeftModelForCausalLM, PeftModelForFeatureExtraction, PeftModelForQuestionAnswering, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) class PeftAutoModelTester(unittest.TestCase): def test_peft_causal_lm(self): model_id = "peft-internal-testing/tiny-OPTForCausalLM-lora" model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForCausalLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForCausalLM)) # check if kwargs are passed correctly model = AutoPeftModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForCausalLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForCausalLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_seq2seq_lm(self): model_id = "peft-internal-testing/tiny_T5ForSeq2SeqLM-lora" model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) # check if kwargs are passed correctly model = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForSeq2SeqLM)) self.assertTrue(model.base_model.lm_head.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSeq2SeqLM.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16) def test_peft_sequence_cls(self): model_id = "peft-internal-testing/tiny_OPTForSequenceClassification-lora" model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForSequenceClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) # check if kwargs are passed correctly model = AutoPeftModelForSequenceClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForSequenceClassification)) self.assertTrue(model.score.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForSequenceClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_token_classification(self): model_id = "peft-internal-testing/tiny_GPT2ForTokenClassification-lora" model = AutoPeftModelForTokenClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForTokenClassification.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) # check if kwargs are passed correctly model = AutoPeftModelForTokenClassification.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForTokenClassification)) self.assertTrue(model.base_model.classifier.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForTokenClassification.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_question_answering(self): model_id = "peft-internal-testing/tiny_OPTForQuestionAnswering-lora" model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) # check if kwargs are passed correctly model = AutoPeftModelForQuestionAnswering.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForQuestionAnswering)) self.assertTrue(model.base_model.qa_outputs.original_module.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForQuestionAnswering.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_feature_extraction(self): model_id = "peft-internal-testing/tiny_OPTForFeatureExtraction-lora" model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) # check if kwargs are passed correctly model = AutoPeftModelForFeatureExtraction.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModelForFeatureExtraction)) self.assertTrue(model.base_model.model.decoder.embed_tokens.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModelForFeatureExtraction.from_pretrained( model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16 ) def test_peft_whisper(self): model_id = "peft-internal-testing/tiny_WhisperForConditionalGeneration-lora" model = AutoPeftModel.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModel)) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model = AutoPeftModel.from_pretrained(model_id) self.assertTrue(isinstance(model, PeftModel)) # check if kwargs are passed correctly model = AutoPeftModel.from_pretrained(model_id, torch_dtype=torch.bfloat16) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(model.base_model.model.model.encoder.embed_positions.weight.dtype == torch.bfloat16) adapter_name = "default" is_trainable = False # This should work _ = AutoPeftModel.from_pretrained(model_id, adapter_name, is_trainable, torch_dtype=torch.bfloat16)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_common_gpu.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import pytest import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoTokenizer, BitsAndBytesConfig, LlamaForCausalLM, WhisperForConditionalGeneration, ) from peft import AdaptionPromptConfig, LoraConfig, PeftModel, get_peft_model, prepare_model_for_kbit_training from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .testing_utils import require_bitsandbytes, require_torch_gpu, require_torch_multi_gpu if is_bnb_available(): from peft.tuners.lora import Linear8bitLt if is_bnb_4bit_available(): from peft.tuners.lora import Linear4bit @require_torch_gpu class PeftGPUCommonTests(unittest.TestCase): r""" A common tester to run common operations that are performed on GPU such as generation, loading in 8bit, etc. """ def setUp(self): self.seq2seq_model_id = "google/flan-t5-base" self.causal_lm_model_id = "facebook/opt-350m" self.audio_model_id = "openai/whisper-large" if torch.cuda.is_available(): self.device = torch.device("cuda:0") def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_lora_bnb_8bit_quantization(self): r""" Test that tests if the 8bit quantization using LoRA works as expected """ whisper_8bit = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, device_map="auto", load_in_8bit=True, ) opt_8bit = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", load_in_8bit=True, ) flan_8bit = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, device_map="auto", load_in_8bit=True, ) flan_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) opt_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") flan_8bit = get_peft_model(flan_8bit, flan_lora_config) self.assertTrue(isinstance(flan_8bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, Linear8bitLt)) opt_8bit = get_peft_model(opt_8bit, opt_lora_config) self.assertTrue(isinstance(opt_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, Linear8bitLt)) whisper_8bit = get_peft_model(whisper_8bit, config) self.assertTrue( isinstance(whisper_8bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, Linear8bitLt) ) @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_lora_bnb_4bit_quantization_from_pretrained_safetensors(self): r""" Test that tests if the 4bit quantization using LoRA works as expected with safetensors weights. """ model_id = "facebook/opt-350m" peft_model_id = "ybelkada/test-st-lora" model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True) model = PeftModel.from_pretrained(model, peft_model_id) _ = model.generate(input_ids=torch.LongTensor([[0, 2, 3, 1]]).to(0)) @require_bitsandbytes @pytest.mark.multi_gpu_tests @pytest.mark.single_gpu_tests def test_lora_bnb_4bit_quantization(self): r""" Test that tests if the 4bit quantization using LoRA works as expected """ whisper_4bit = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, device_map="auto", load_in_4bit=True, ) opt_4bit = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, device_map="auto", load_in_4bit=True, ) flan_4bit = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, device_map="auto", load_in_4bit=True, ) flan_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) opt_lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) config = LoraConfig(r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none") flan_4bit = get_peft_model(flan_4bit, flan_lora_config) self.assertTrue(isinstance(flan_4bit.base_model.model.encoder.block[0].layer[0].SelfAttention.q, Linear4bit)) opt_4bit = get_peft_model(opt_4bit, opt_lora_config) self.assertTrue(isinstance(opt_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, Linear4bit)) whisper_4bit = get_peft_model(whisper_4bit, config) self.assertTrue(isinstance(whisper_4bit.base_model.model.model.decoder.layers[0].self_attn.v_proj, Linear4bit)) @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_lora_causal_lm_mutli_gpu_inference(self): r""" Test if LORA can be used for inference on multiple GPUs. """ lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = AutoModelForCausalLM.from_pretrained(self.causal_lm_model_id, device_map="balanced") tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) self.assertEqual(set(model.hf_device_map.values()), {0, 1}) model = get_peft_model(model, lora_config) self.assertTrue(isinstance(model, PeftModel)) dummy_input = "This is a dummy input:" input_ids = tokenizer(dummy_input, return_tensors="pt").input_ids.to(self.device) # this should work without any problem _ = model.generate(input_ids=input_ids) @require_torch_multi_gpu @pytest.mark.multi_gpu_tests @require_bitsandbytes def test_lora_seq2seq_lm_mutli_gpu_inference(self): r""" Test if LORA can be used for inference on multiple GPUs - 8bit version. """ lora_config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="SEQ_2_SEQ_LM" ) model = AutoModelForSeq2SeqLM.from_pretrained(self.seq2seq_model_id, device_map="balanced", load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) self.assertEqual(set(model.hf_device_map.values()), {0, 1}) model = get_peft_model(model, lora_config) self.assertTrue(isinstance(model, PeftModel)) self.assertTrue(isinstance(model.base_model.model.encoder.block[0].layer[0].SelfAttention.q, Linear8bitLt)) dummy_input = "This is a dummy input:" input_ids = tokenizer(dummy_input, return_tensors="pt").input_ids.to(self.device) # this should work without any problem _ = model.generate(input_ids=input_ids) @require_torch_multi_gpu @pytest.mark.multi_gpu_tests @require_bitsandbytes def test_adaption_prompt_8bit(self): model = LlamaForCausalLM.from_pretrained( "HuggingFaceM4/tiny-random-LlamaForCausalLM", load_in_8bit=True, torch_dtype=torch.float16, device_map="auto", ) model = prepare_model_for_kbit_training(model) config = AdaptionPromptConfig( adapter_len=10, adapter_layers=2, task_type="CAUSAL_LM", ) model = get_peft_model(model, config) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(0) _ = model(random_input) @require_torch_multi_gpu @pytest.mark.multi_gpu_tests @require_bitsandbytes def test_adaption_prompt_4bit(self): model = LlamaForCausalLM.from_pretrained( "HuggingFaceM4/tiny-random-LlamaForCausalLM", load_in_4bit=True, torch_dtype=torch.float16, device_map="auto", ) model = prepare_model_for_kbit_training(model) config = AdaptionPromptConfig( adapter_len=10, adapter_layers=2, task_type="CAUSAL_LM", ) model = get_peft_model(model, config) random_input = torch.LongTensor([[1, 0, 1, 0, 1, 0]]).to(0) _ = model(random_input) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_print_4bit_expected(self): EXPECTED_TRAINABLE_PARAMS = 294912 EXPECTED_ALL_PARAMS = 125534208 model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", load_in_4bit=True, ) config = LoraConfig( r=8, ) model = get_peft_model(model, config) trainable_params, all_params = model.get_nb_trainable_parameters() self.assertEqual(trainable_params, EXPECTED_TRAINABLE_PARAMS) self.assertEqual(all_params, EXPECTED_ALL_PARAMS) # test with double quant bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, ) model = AutoModelForCausalLM.from_pretrained( "facebook/opt-125m", quantization_config=bnb_config, ) config = LoraConfig( r=8, ) model = get_peft_model(model, config) trainable_params, all_params = model.get_nb_trainable_parameters() self.assertEqual(trainable_params, EXPECTED_TRAINABLE_PARAMS) self.assertEqual(all_params, EXPECTED_ALL_PARAMS) @require_torch_gpu @pytest.mark.single_gpu_tests @require_bitsandbytes def test_modules_to_save_grad(self): model_id = "bigscience/bloomz-560m" load_in_4bit = True model = AutoModelForSequenceClassification.from_pretrained( model_id, load_in_4bit=load_in_4bit, torch_dtype=torch.float32, ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=16, lora_dropout=0.05, bias="none", task_type="SEQ_CLS", ) peft_model = get_peft_model(model, config) lm_head = peft_model.base_model.model.score original_module = lm_head.original_module modules_to_save = lm_head.modules_to_save.default inputs = torch.randn((1024)) o1 = lm_head(inputs) o1.mean().backward() self.assertTrue(modules_to_save.weight.requires_grad is True) self.assertTrue(original_module.weight.grad is None) self.assertTrue(modules_to_save.weight.grad is not None)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_config.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import pickle import tempfile import unittest import warnings import pytest from peft import ( AdaptionPromptConfig, IA3Config, LoraConfig, PeftConfig, PrefixTuningConfig, PromptEncoder, PromptEncoderConfig, PromptTuningConfig, ) PEFT_MODELS_TO_TEST = [("lewtun/tiny-random-OPTForCausalLM-delta", "v1")] class PeftConfigTestMixin: all_config_classes = ( LoraConfig, PromptEncoderConfig, PrefixTuningConfig, PromptTuningConfig, AdaptionPromptConfig, IA3Config, ) class PeftConfigTester(unittest.TestCase, PeftConfigTestMixin): def test_methods(self): r""" Test if all configs have the expected methods. Here we test - to_dict - save_pretrained - from_pretrained - from_json_file """ # test if all configs have the expected methods for config_class in self.all_config_classes: config = config_class() self.assertTrue(hasattr(config, "to_dict")) self.assertTrue(hasattr(config, "save_pretrained")) self.assertTrue(hasattr(config, "from_pretrained")) self.assertTrue(hasattr(config, "from_json_file")) def test_task_type(self): for config_class in self.all_config_classes: # assert this will not fail _ = config_class(task_type="test") def test_from_pretrained(self): r""" Test if the config is correctly loaded using: - from_pretrained """ for config_class in self.all_config_classes: for model_name, revision in PEFT_MODELS_TO_TEST: # Test we can load config from delta _ = config_class.from_pretrained(model_name, revision=revision) def test_save_pretrained(self): r""" Test if the config is correctly saved and loaded using - save_pretrained """ for config_class in self.all_config_classes: config = config_class() with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) self.assertEqual(config.to_dict(), config_from_pretrained.to_dict()) def test_from_json_file(self): for config_class in self.all_config_classes: config = config_class() with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_json = config_class.from_json_file(os.path.join(tmp_dirname, "adapter_config.json")) self.assertEqual(config.to_dict(), config_from_json) def test_to_dict(self): r""" Test if the config can be correctly converted to a dict using: - to_dict """ for config_class in self.all_config_classes: config = config_class() self.assertTrue(isinstance(config.to_dict(), dict)) def test_from_pretrained_cache_dir(self): r""" Test if the config is correctly loaded with extra kwargs """ with tempfile.TemporaryDirectory() as tmp_dirname: for config_class in self.all_config_classes: for model_name, revision in PEFT_MODELS_TO_TEST: # Test we can load config from delta _ = config_class.from_pretrained(model_name, revision=revision, cache_dir=tmp_dirname) def test_from_pretrained_cache_dir_remote(self): r""" Test if the config is correctly loaded with a checkpoint from the hub """ with tempfile.TemporaryDirectory() as tmp_dirname: _ = PeftConfig.from_pretrained("ybelkada/test-st-lora", cache_dir=tmp_dirname) self.assertTrue("models--ybelkada--test-st-lora" in os.listdir(tmp_dirname)) def test_set_attributes(self): # manually set attributes and check if they are correctly written for config_class in self.all_config_classes: config = config_class(peft_type="test") # save pretrained with tempfile.TemporaryDirectory() as tmp_dirname: config.save_pretrained(tmp_dirname) config_from_pretrained = config_class.from_pretrained(tmp_dirname) self.assertEqual(config.to_dict(), config_from_pretrained.to_dict()) def test_config_copy(self): # see https://github.com/huggingface/peft/issues/424 for config_class in self.all_config_classes: config = config_class() copied = copy.copy(config) self.assertEqual(config.to_dict(), copied.to_dict()) def test_config_deepcopy(self): # see https://github.com/huggingface/peft/issues/424 for config_class in self.all_config_classes: config = config_class() copied = copy.deepcopy(config) self.assertEqual(config.to_dict(), copied.to_dict()) def test_config_pickle_roundtrip(self): # see https://github.com/huggingface/peft/issues/424 for config_class in self.all_config_classes: config = config_class() copied = pickle.loads(pickle.dumps(config)) self.assertEqual(config.to_dict(), copied.to_dict()) def test_prompt_encoder_warning_num_layers(self): # This test checks that if a prompt encoder config is created with an argument that is ignored, there should be # warning. However, there should be no warning if the default value is used. kwargs = { "num_virtual_tokens": 20, "num_transformer_submodules": 1, "token_dim": 768, "encoder_hidden_size": 768, } # there should be no warning with just default argument for encoder_num_layer config = PromptEncoderConfig(**kwargs) with warnings.catch_warnings(): PromptEncoder(config) # when changing encoder_num_layer, there should be a warning for MLP since that value is not used config = PromptEncoderConfig(encoder_num_layers=123, **kwargs) with pytest.warns(UserWarning) as record: PromptEncoder(config) expected_msg = "for MLP, the argument `encoder_num_layers` is ignored. Exactly 2 MLP layers are used." assert str(record.list[0].message) == expected_msg
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_custom_models.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import torch from parameterized import parameterized from torch import nn from transformers.pytorch_utils import Conv1D from peft import LoraConfig, get_peft_model from .testing_common import PeftCommonTester # MLP is a vanilla FF network with only linear layers # EmbConv1D has an embedding and a Conv1D layer # Conv2D has a Conv2D layer TEST_CASES = [ ("Vanilla MLP 1", "MLP", LoraConfig, {"target_modules": "lin0"}), ("Vanilla MLP 2", "MLP", LoraConfig, {"target_modules": ["lin0"]}), ("Vanilla MLP 3", "MLP", LoraConfig, {"target_modules": ["lin1"]}), ("Vanilla MLP 4", "MLP", LoraConfig, {"target_modules": ["lin0", "lin1"]}), ("Vanilla MLP 5", "MLP", LoraConfig, {"target_modules": ["lin0"], "modules_to_save": ["lin1"]}), ( "Vanilla MLP 6", "MLP", LoraConfig, { "target_modules": ["lin0"], "lora_alpha": 4, "lora_dropout": 0.1, }, ), ("Embedding + transformers Conv1D 1", "EmbConv1D", LoraConfig, {"target_modules": ["conv1d"]}), ("Embedding + transformers Conv1D 2", "EmbConv1D", LoraConfig, {"target_modules": ["emb"]}), ("Embedding + transformers Conv1D 3", "EmbConv1D", LoraConfig, {"target_modules": ["emb", "conv1d"]}), ("Conv2d 1", "Conv2d", LoraConfig, {"target_modules": ["conv2d"]}), ("Conv2d 2", "Conv2d", LoraConfig, {"target_modules": ["conv2d", "lin0"]}), ] class MLP(nn.Module): def __init__(self): super().__init__() self.lin0 = nn.Linear(10, 20) self.relu = nn.ReLU() self.drop = nn.Dropout(0.5) self.lin1 = nn.Linear(20, 2) self.sm = nn.LogSoftmax(dim=-1) def forward(self, X): X = X.float() X = self.lin0(X) X = self.relu(X) X = self.drop(X) X = self.lin1(X) X = self.sm(X) return X class ModelEmbConv1D(nn.Module): def __init__(self): super().__init__() self.emb = nn.Embedding(100, 5) self.conv1d = Conv1D(1, 5) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) def forward(self, X): X = self.emb(X) X = self.conv1d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) return X class ModelConv2D(nn.Module): def __init__(self): super().__init__() self.conv2d = nn.Conv2d(5, 10, 3) self.relu = nn.ReLU() self.flat = nn.Flatten() self.lin0 = nn.Linear(10, 2) def forward(self, X): X = X.float().reshape(2, 5, 3, 3) X = self.conv2d(X) X = self.relu(X) X = self.flat(X) X = self.lin0(X) return X class MockTransformerWrapper: """Mock class to behave like a transformers model. This is needed because the tests initialize the model by calling transformers_class.from_pretrained. """ @classmethod def from_pretrained(cls, model_id): # set the seed so that from_pretrained always returns the same model torch.manual_seed(0) if model_id == "MLP": return MLP() if model_id == "EmbConv1D": return ModelEmbConv1D() if model_id == "Conv2d": return ModelConv2D() raise ValueError(f"model_id {model_id} not implemented") class PeftCustomModelTester(unittest.TestCase, PeftCommonTester): """TODO""" transformers_class = MockTransformerWrapper def prepare_inputs_for_testing(self): X = torch.arange(90).view(9, 10).to(self.torch_device) return {"X": X} @parameterized.expand(TEST_CASES) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): # This test does not work with custom models because it assumes that # there is always a method get_input_embeddings that returns a layer # which does not need updates. Instead, a new test is added below that # checks that LoRA works as expected. pass @parameterized.expand(TEST_CASES) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): # for embeddings, even with init_lora_weights=False, the LoRA embeddings weights are still initialized to # perform the identity transform, thus the test would fail. if config_kwargs["target_modules"] == ["emb"]: return config_kwargs = config_kwargs.copy() config_kwargs["init_lora_weights"] = False self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_generate(self, test_name, model_id, config_cls, config_kwargs): # Custom models do not (necessarily) have a generate method, so this test is not performed pass @parameterized.expand(TEST_CASES) def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): # Custom models do not (necessarily) have a generate method, so this test is not performed pass @parameterized.expand(TEST_CASES) def test_training_customs(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_training_customs_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): # At the moment, layer indexing only works when layer names conform to a specific pattern, which is not # guaranteed here. Therefore, this test is not performed. pass @parameterized.expand(TEST_CASES) def test_training_customs_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(TEST_CASES) def test_only_params_are_updated(self, test_name, model_id, config_cls, config_kwargs): # An explicit test that when using LoRA on a custom model, only the LoRA parameters are updated during training X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model_before = copy.deepcopy(model) model.train() optimizer = torch.optim.SGD(model.parameters(), lr=0.5) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) loss = y_pred.sum() loss.backward() optimizer.step() tol = 1e-4 params_before = dict(model_before.named_parameters()) params_after = dict(model.named_parameters()) self.assertEqual(params_before.keys(), params_after.keys()) for name, param_before in params_before.items(): param_after = params_after[name] if ("lora_" in name) or ("modules_to_save" in name): # target_modules and modules_to_save _are_ updated self.assertFalse(torch.allclose(param_before, param_after, atol=tol, rtol=tol)) else: self.assertTrue(torch.allclose(param_before, param_after, atol=tol, rtol=tol)) @parameterized.expand(TEST_CASES) def test_disable_adapters(self, test_name, model_id, config_cls, config_kwargs): X = self.prepare_inputs_for_testing() model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model.eval() outputs_before = model(**X) model.train() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) # train at least 3 steps for all parameters to be updated (probably this is required because of symmetry # breaking of some LoRA layers that are initialized with constants) for _ in range(3): optimizer.zero_grad() y_pred = model(**X) loss = y_pred.sum() loss.backward() optimizer.step() model.eval() outputs_after = model(**X) with model.disable_adapter(): outputs_disabled = model(**X) # check that after leaving the disable_adapter context, everything is enabled again outputs_enabled_after_disable = model(**X) self.assertFalse(torch.allclose(outputs_before, outputs_after)) self.assertTrue(torch.allclose(outputs_before, outputs_disabled)) self.assertTrue(torch.allclose(outputs_after, outputs_enabled_after_disable)) @parameterized.expand(TEST_CASES) def test_disable_adapter_with_bias_warns(self, test_name, model_id, config_cls, config_kwargs): # When training biases in lora, disabling adapters does not reset the biases, so the output is not what users # might expect. Therefore, a warning should be given. # Note: We test only with custom models since they run really fast. There is really no point in testing the same # thing with decoder, encoder_decoder, etc. def run_with_disable(config_kwargs, bias): config_kwargs = config_kwargs.copy() config_kwargs["bias"] = bias model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) with peft_model.disable_adapter(): pass # there is nothing to be done # check that bias=all and bias=lora_only give a warning with the correct message msg_start = "Careful, disabling adapter layers with bias configured to be" with self.assertWarns(UserWarning, msg=msg_start): run_with_disable(config_kwargs, bias="lora_only") with self.assertWarns(UserWarning, msg=msg_start): run_with_disable(config_kwargs, bias="all") # For bias=none, there is no warning. Unfortunately, AFAIK unittest has no option to assert that no warning is # given, therefore, we check that the unittest gives us an AssertionError if we check for a warning bias_warning_was_given = False try: with self.assertWarns(UserWarning) as cm: run_with_disable(config_kwargs, bias="none") # if we get here, it means there was no AssertionError, i.e. there are warnings -- let's check that they # are not related to the bias setting if any(warning.message.args[0].startswith(msg_start) for warning in cm.warnings): bias_warning_was_given = True except AssertionError: # This is good, there was an AssertionError, i.e. there was no warning pass if bias_warning_was_given: # This is bad, there was a warning about the bias when there should not have been any. self.fail("There should be no warning when bias is set to 'none'") @parameterized.expand(TEST_CASES) def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs): self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_decoder_models.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from parameterized import parameterized from transformers import AutoModelForCausalLM from peft import AdaLoraConfig from .testing_common import PeftCommonTester, PeftTestConfigManager PEFT_DECODER_MODELS_TO_TEST = [ "hf-internal-testing/tiny-random-OPTForCausalLM", "hf-internal-testing/tiny-random-GPTNeoXForCausalLM", "hf-internal-testing/tiny-random-GPT2LMHeadModel", "hf-internal-testing/tiny-random-BloomForCausalLM", "hf-internal-testing/tiny-random-gpt_neo", "hf-internal-testing/tiny-random-GPTJForCausalLM", "hf-internal-testing/tiny-random-GPTBigCodeForCausalLM", "HuggingFaceM4/tiny-random-LlamaForCausalLM", ] FULL_GRID = { "model_ids": PEFT_DECODER_MODELS_TO_TEST, "task_type": "CAUSAL_LM", } def skip_non_pt_mqa(test_list): r""" Skip tests that are prefix tuning for MQA models (not supported yet) """ return [test for test in test_list if not ("prefix_tuning" in test[0] and "GPTBigCodeForCausalLM" in test[0])] def skip_adalora_and_gpt2(test_list): return [test for test in test_list if not (("GPT2LMHeadModel" in test[1]) and (test[2] == AdaLoraConfig))] class PeftDecoderModelTester(unittest.TestCase, PeftCommonTester): r""" Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods We use parametrized.expand for debugging purposes to test each model individually. """ transformers_class = AutoModelForCausalLM def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return input_dict @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "CAUSAL_LM", }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_pt_mqa)) def test_generate(self, test_name, model_id, config_cls, config_kwargs): self._test_generate(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_pt_mqa)) def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): self._test_generate_half_prec(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_pt_mqa)) def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs): self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_decoders(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs): self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "task_type": "CAUSAL_LM", }, filter_params_func=skip_adalora_and_gpt2, ) ) def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_unload_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "CAUSAL_LM", }, ) ) def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_pt_mqa)) def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "task_type": "CAUSAL_LM", }, filter_params_func=skip_non_pt_mqa, ) ) def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_disable_adapter(model_id, config_cls, config_kwargs) def test_generate_adalora_no_dropout(self): # test for issue #730 model_id = "hf-internal-testing/tiny-random-OPTForCausalLM" config_kwargs = { "target_modules": None, "task_type": "CAUSAL_LM", "lora_dropout": 0.0, } self._test_generate(model_id, AdaLoraConfig, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_pt_mqa)) def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_encoder_decoder_models.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from parameterized import parameterized from transformers import AutoModelForSeq2SeqLM from .testing_common import PeftCommonTester, PeftTestConfigManager PEFT_ENCODER_DECODER_MODELS_TO_TEST = [ "ybelkada/tiny-random-T5ForConditionalGeneration-calibrated", "hf-internal-testing/tiny-random-BartForConditionalGeneration", ] FULL_GRID = {"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "task_type": "SEQ_2_SEQ_LM"} class PeftEncoderDecoderModelTester(unittest.TestCase, PeftCommonTester): r""" Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods We use parametrized.expand for debugging purposes to test each model individually. """ transformers_class = AutoModelForSeq2SeqLM def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) decoder_input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, } return input_dict @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): self._test_merge_layers(model_id, config_cls, config_kwargs) # skip non lora models - generate does not work for prefix tuning, prompt tuning @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_generate(self, test_name, model_id, config_cls, config_kwargs): self._test_generate(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): self._test_generate_half_prec(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs): self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_encoder_decoders(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_encoder_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_encoder_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs): self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_unload_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "SEQ_2_SEQ_LM", }, ) ) def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_disable_adapter(model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_feature_extraction_models.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from parameterized import parameterized from transformers import AutoModel from peft import PrefixTuningConfig, PromptLearningConfig from .testing_common import PeftCommonTester, PeftTestConfigManager PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST = [ "hf-internal-testing/tiny-random-BertModel", "hf-internal-testing/tiny-random-RobertaModel", "hf-internal-testing/tiny-random-DebertaModel", "hf-internal-testing/tiny-random-DebertaV2Model", ] FULL_GRID = { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "task_type": "FEATURE_EXTRACTION", } def skip_non_prompt_tuning(test_list): """Skip tests that are not prompt tuning""" return [ test for test in test_list if issubclass(test[2], PromptLearningConfig) and (test[2] != PrefixTuningConfig) ] def skip_deberta_lora_tests(test_list): r""" Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error) """ return [test for test in test_list if not (any(k in test[0] for k in ["lora", "ia3"]) and "Deberta" in test[0])] def skip_deberta_pt_tests(test_list): r""" Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error) """ return [test for test in test_list if not ("prefix_tuning" in test[0] and "Deberta" in test[0])] class PeftFeatureExtractionModelTester(unittest.TestCase, PeftCommonTester): r""" Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods We use parametrized.expand for debugging purposes to test each model individually. """ transformers_class = AutoModel def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return input_dict @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_pt_tests) ) def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_lora_tests) ) def test_training_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_unload_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_prompt_tuning) ) def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_gpu_examples.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import os import tempfile import unittest from dataclasses import dataclass from typing import Any, Dict, List, Union import pytest import torch from datasets import Audio, DatasetDict, load_dataset from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForLanguageModeling, Seq2SeqTrainer, Seq2SeqTrainingArguments, Trainer, TrainingArguments, WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperProcessor, WhisperTokenizer, ) from peft import ( AdaLoraConfig, LoraConfig, get_peft_model, prepare_model_for_int8_training, prepare_model_for_kbit_training, ) from .testing_utils import ( require_auto_gptq, require_bitsandbytes, require_optimum, require_torch_gpu, require_torch_multi_gpu, ) # A full testing suite that tests all the necessary features on GPU. The tests should # rely on the example scripts to test the features. @dataclass class DataCollatorSpeechSeq2SeqWithPadding: r""" Directly copied from: https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb """ processor: Any def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need different padding methods # first treat the audio inputs by simply returning torch tensors input_features = [{"input_features": feature["input_features"]} for feature in features] batch = self.processor.feature_extractor.pad(input_features, return_tensors="pt") # get the tokenized label sequences label_features = [{"input_ids": feature["labels"]} for feature in features] # pad the labels to max length labels_batch = self.processor.tokenizer.pad(label_features, return_tensors="pt") # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if (labels[:, 0] == self.processor.tokenizer.bos_token_id).all().cpu().item(): labels = labels[:, 1:] batch["labels"] = labels return batch @require_torch_gpu @require_bitsandbytes class PeftBnbGPUExampleTests(unittest.TestCase): r""" A single GPU int8 + fp4 test suite, this will test if training fits correctly on a single GPU device (1x NVIDIA T4 16GB) using bitsandbytes. The tests are the following: - Seq2Seq model training based on: https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_flan_t5_large_bnb_peft.ipynb - Causal LM model training based on: https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb - Audio model training based on: https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb """ def setUp(self): self.seq2seq_model_id = "google/flan-t5-base" self.causal_lm_model_id = "facebook/opt-6.7b" self.audio_model_id = "openai/whisper-large" def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() gc.collect() @pytest.mark.single_gpu_tests def test_causal_lm_training(self): r""" Test the CausalLM training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `opt-6.7b` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, load_in_8bit=True, device_map="auto", ) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_int8_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.single_gpu_tests @require_torch_gpu def test_4bit_adalora_causalLM(self): r""" Tests the 4bit training with adalora """ model_id = "facebook/opt-350m" model = AutoModelForCausalLM.from_pretrained(model_id, load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained(model_id) model.gradient_checkpointing_enable() model = prepare_model_for_kbit_training(model) peft_config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_causal_lm_training_mutli_gpu(self): r""" Test the CausalLM training on a multi-GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `opt-6.7b` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, load_in_8bit=True, device_map="auto", ) self.assertEqual(set(model.hf_device_map.values()), {0, 1}) tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) model = prepare_model_for_int8_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.single_gpu_tests def test_seq2seq_lm_training_single_gpu(self): r""" Test the Seq2SeqLM training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `flan-large` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, load_in_8bit=True, device_map={"": 0}, ) self.assertEqual(set(model.hf_device_map.values()), {0}) tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) model = prepare_model_for_int8_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_seq2seq_lm_training_mutli_gpu(self): r""" Test the Seq2SeqLM training on a multi-GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/Finetune_opt_bnb_peft.ipynb where we train `flan-large` on `english_quotes` dataset in few steps. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForSeq2SeqLM.from_pretrained( self.seq2seq_model_id, load_in_8bit=True, device_map="balanced", ) self.assertEqual(set(model.hf_device_map.values()), {0, 1}) tokenizer = AutoTokenizer.from_pretrained(self.seq2seq_model_id) model = prepare_model_for_int8_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q", "v"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.single_gpu_tests def test_audio_model_training(self): r""" Test the audio model training on a single GPU device. This test is a converted version of https://github.com/huggingface/peft/blob/main/examples/int8_training/peft_bnb_whisper_large_v2_training.ipynb """ with tempfile.TemporaryDirectory() as tmp_dir: dataset_name = "ybelkada/common_voice_mr_11_0_copy" task = "transcribe" language = "Marathi" common_voice = DatasetDict() common_voice["train"] = load_dataset(dataset_name, split="train+validation") common_voice = common_voice.remove_columns( ["accent", "age", "client_id", "down_votes", "gender", "locale", "path", "segment", "up_votes"] ) feature_extractor = WhisperFeatureExtractor.from_pretrained(self.audio_model_id) tokenizer = WhisperTokenizer.from_pretrained(self.audio_model_id, language=language, task=task) processor = WhisperProcessor.from_pretrained(self.audio_model_id, language=language, task=task) common_voice = common_voice.cast_column("audio", Audio(sampling_rate=16000)) def prepare_dataset(batch): # load and resample audio data from 48 to 16kHz audio = batch["audio"] # compute log-Mel input features from input audio array batch["input_features"] = feature_extractor( audio["array"], sampling_rate=audio["sampling_rate"] ).input_features[0] # encode target text to label ids batch["labels"] = tokenizer(batch["sentence"]).input_ids return batch common_voice = common_voice.map( prepare_dataset, remove_columns=common_voice.column_names["train"], num_proc=2 ) data_collator = DataCollatorSpeechSeq2SeqWithPadding(processor=processor) model = WhisperForConditionalGeneration.from_pretrained( self.audio_model_id, load_in_8bit=True, device_map="auto" ) model.config.forced_decoder_ids = None model.config.suppress_tokens = [] model = prepare_model_for_int8_training(model) # as Whisper model uses Conv layer in encoder, checkpointing disables grad computation # to avoid this, make the inputs trainable def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.model.encoder.conv1.register_forward_hook(make_inputs_require_grad) config = LoraConfig( r=32, lora_alpha=64, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none" ) model = get_peft_model(model, config) model.print_trainable_parameters() training_args = Seq2SeqTrainingArguments( output_dir=tmp_dir, # change to a repo name of your choice per_device_train_batch_size=8, gradient_accumulation_steps=1, # increase by 2x for every 2x decrease in batch size learning_rate=1e-3, warmup_steps=2, max_steps=3, fp16=True, per_device_eval_batch_size=8, generation_max_length=128, logging_steps=25, remove_unused_columns=False, # required as the PeftModel forward doesn't have the signature of the wrapped model's forward label_names=["labels"], # same reason as above ) trainer = Seq2SeqTrainer( args=training_args, model=model, train_dataset=common_voice["train"], data_collator=data_collator, tokenizer=processor.feature_extractor, ) trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @require_torch_gpu @require_auto_gptq @require_optimum class PeftGPTQGPUTests(unittest.TestCase): r""" GPTQ + peft tests """ def setUp(self): from transformers import GPTQConfig self.causal_lm_model_id = "marcsun13/opt-350m-gptq-4bit" self.quantization_config = GPTQConfig(bits=4, disable_exllama=True) self.tokenizer = AutoTokenizer.from_pretrained(self.causal_lm_model_id) def tearDown(self): r""" Efficient mechanism to free GPU memory after each test. Based on https://github.com/huggingface/transformers/issues/21094 """ gc.collect() torch.cuda.empty_cache() @pytest.mark.single_gpu_tests def test_causal_lm_training(self): r""" Test the CausalLM training on a single GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.single_gpu_tests def test_adalora_causalLM(self): r""" Tests the gptq training with adalora """ model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) model = prepare_model_for_kbit_training(model) peft_config = AdaLoraConfig( init_r=6, target_r=4, tinit=50, tfinal=100, deltaT=5, beta1=0.3, beta2=0.3, orth_reg_weight=0.2, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, peft_config) data = load_dataset("ybelkada/english_quotes_copy") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) with tempfile.TemporaryDirectory() as tmp_dir: trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"]) @pytest.mark.multi_gpu_tests @require_torch_multi_gpu def test_causal_lm_training_mutli_gpu(self): r""" Test the CausalLM training on a multi-GPU device. The test would simply fail if the adapters are not set correctly. """ with tempfile.TemporaryDirectory() as tmp_dir: model = AutoModelForCausalLM.from_pretrained( self.causal_lm_model_id, torch_dtype=torch.float16, device_map="auto", quantization_config=self.quantization_config, ) self.assertEqual(set(model.hf_device_map.values()), {0, 1}) model = prepare_model_for_kbit_training(model) setattr(model, "model_parallel", True) setattr(model, "is_parallelizable", True) config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", ) model = get_peft_model(model, config) data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: self.tokenizer(samples["quote"]), batched=True) trainer = Trainer( model=model, train_dataset=data["train"], args=TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=2, max_steps=3, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir=tmp_dir, ), data_collator=DataCollatorForLanguageModeling(self.tokenizer, mlm=False), ) model.config.use_cache = False trainer.train() model.cpu().save_pretrained(tmp_dir) self.assertTrue("adapter_config.json" in os.listdir(tmp_dir)) self.assertTrue("adapter_model.bin" in os.listdir(tmp_dir)) # assert loss is not None self.assertIsNotNone(trainer.state.log_history[-1]["train_loss"])
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_hub_features.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import AutoModelForCausalLM from peft import PeftConfig, PeftModel PEFT_MODELS_TO_TEST = [("peft-internal-testing/test-lora-subfolder", "test")] class PeftHubFeaturesTester(unittest.TestCase): def test_subfolder(self): r""" Test if subfolder argument works as expected """ for model_id, subfolder in PEFT_MODELS_TO_TEST: config = PeftConfig.from_pretrained(model_id, subfolder=subfolder) model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, ) model = PeftModel.from_pretrained(model, model_id, subfolder=subfolder) self.assertTrue(isinstance(model, PeftModel))
0
hf_public_repos/peft
hf_public_repos/peft/tests/test_stablediffusion.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from dataclasses import asdict, replace from unittest import TestCase import numpy as np from diffusers import StableDiffusionPipeline from parameterized import parameterized from peft import LoraConfig, get_peft_model from .testing_common import ClassInstantier, PeftCommonTester from .testing_utils import temp_seed PEFT_DIFFUSERS_SD_MODELS_TO_TEST = ["hf-internal-testing/tiny-stable-diffusion-torch"] CONFIG_TESTING_KWARGS = ( { "text_encoder": { "r": 8, "lora_alpha": 32, "target_modules": ["k_proj", "q_proj", "v_proj", "out_proj", "fc1", "fc2"], "lora_dropout": 0.0, "bias": "none", }, "unet": { "r": 8, "lora_alpha": 32, "target_modules": ["proj_in", "proj_out", "to_k", "to_q", "to_v", "to_out.0", "ff.net.0.proj", "ff.net.2"], "lora_dropout": 0.0, "bias": "none", }, }, ) CLASSES_MAPPING = { "lora": (LoraConfig, CONFIG_TESTING_KWARGS[0]), } PeftStableDiffusionTestConfigManager = ClassInstantier(CLASSES_MAPPING) class StableDiffusionModelTester(TestCase, PeftCommonTester): r""" Tests that diffusers StableDiffusion model works with PEFT as expected. """ transformers_class = StableDiffusionPipeline def instantiate_sd_peft(self, model_id, config_cls, config_kwargs): # Instantiate StableDiffusionPipeline model = self.transformers_class.from_pretrained(model_id) config_kwargs = config_kwargs.copy() text_encoder_kwargs = config_kwargs.pop("text_encoder") unet_kwargs = config_kwargs.pop("unet") # the remaining config kwargs should be applied to both configs for key, val in config_kwargs.items(): text_encoder_kwargs[key] = val unet_kwargs[key] = val # Instantiate text_encoder adapter config_text_encoder = config_cls(**text_encoder_kwargs) model.text_encoder = get_peft_model(model.text_encoder, config_text_encoder) # Instantiate unet adapter config_unet = config_cls(**unet_kwargs) model.unet = get_peft_model(model.unet, config_unet) # Move model to device model = model.to(self.torch_device) return model def prepare_inputs_for_testing(self): return { "prompt": "a high quality digital photo of a cute corgi", "num_inference_steps": 20, } @parameterized.expand( PeftStableDiffusionTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): # Instantiate model & adapters model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Generate output for peft modified StableDiffusion dummy_input = self.prepare_inputs_for_testing() with temp_seed(seed=42): peft_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Merge adapter and model model.text_encoder = model.text_encoder.merge_and_unload() model.unet = model.unet.merge_and_unload() # Generate output for peft merged StableDiffusion with temp_seed(seed=42): merged_output = np.array(model(**dummy_input).images[0]).astype(np.float32) # Images are in uint8 drange, so use large atol self.assertTrue(np.allclose(peft_output, merged_output, atol=1.0)) @parameterized.expand( PeftStableDiffusionTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, }, ) ) def test_add_weighted_adapter_base_unchanged(self, test_name, model_id, config_cls, config_kwargs): # Instantiate model & adapters model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) # Get current available adapter config text_encoder_adapter_name = next(iter(model.text_encoder.peft_config.keys())) unet_adapter_name = next(iter(model.unet.peft_config.keys())) text_encoder_adapter_config = replace(model.text_encoder.peft_config[text_encoder_adapter_name]) unet_adapter_config = replace(model.unet.peft_config[unet_adapter_name]) # Create weighted adapters model.text_encoder.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test") model.unet.add_weighted_adapter([unet_adapter_name], [0.5], "weighted_adapter_test") # Assert that base adapters config did not change self.assertTrue( asdict(text_encoder_adapter_config) == asdict(model.text_encoder.peft_config[text_encoder_adapter_name]) ) self.assertTrue(asdict(unet_adapter_config) == asdict(model.unet.peft_config[unet_adapter_name])) @parameterized.expand( PeftStableDiffusionTestConfigManager.get_grid_parameters( { "model_ids": PEFT_DIFFUSERS_SD_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, }, ) ) def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_disable_adapter(model_id, config_cls, config_kwargs)
0
hf_public_repos/peft
hf_public_repos/peft/tests/testing_common.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle import tempfile from collections import OrderedDict from dataclasses import replace import torch from diffusers import StableDiffusionPipeline from peft import ( AdaLoraConfig, IA3Config, LoraConfig, PeftModel, PrefixTuningConfig, PromptEncoderConfig, PromptLearningConfig, PromptTuningConfig, get_peft_model, get_peft_model_state_dict, prepare_model_for_int8_training, ) from peft.tuners.lora import LoraLayer from peft.utils import _get_submodules, infer_device CONFIG_CLASSES = ( IA3Config, LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig, ) CONFIG_TESTING_KWARGS = ( # IA³ { "target_modules": None, "feedforward_modules": None, }, # LoRA { "r": 8, "lora_alpha": 32, "target_modules": None, "lora_dropout": 0.05, "bias": "none", }, # prefix tuning { "num_virtual_tokens": 10, }, # prompt encoder { "num_virtual_tokens": 10, "encoder_hidden_size": 32, }, # prompt tuning { "num_virtual_tokens": 10, }, # AdaLoRA { "target_modules": None, }, ) CLASSES_MAPPING = { "ia3": (IA3Config, CONFIG_TESTING_KWARGS[0]), "lora": (LoraConfig, CONFIG_TESTING_KWARGS[1]), "prefix_tuning": (PrefixTuningConfig, CONFIG_TESTING_KWARGS[2]), "prompt_encoder": (PromptEncoderConfig, CONFIG_TESTING_KWARGS[3]), "prompt_tuning": (PromptTuningConfig, CONFIG_TESTING_KWARGS[4]), "adalora": (AdaLoraConfig, CONFIG_TESTING_KWARGS[5]), } # Adapted from https://github.com/huggingface/transformers/blob/48327c57182fdade7f7797d1eaad2d166de5c55b/src/transformers/activations.py#LL166C7-L166C22 class ClassInstantier(OrderedDict): def __getitem__(self, key, *args, **kwargs): # check if any of the kwargs is inside the config class kwargs if any(kwarg in self[key][1] for kwarg in kwargs): new_config_kwargs = self[key][1].copy() new_config_kwargs.update(kwargs) return (self[key][0], new_config_kwargs) return super().__getitem__(key, *args, **kwargs) def get_grid_parameters(self, grid_parameters, filter_params_func=None): r""" Returns a list of all possible combinations of the parameters in the config classes. Args: grid_parameters (`dict`): A dictionary containing the parameters to be tested. There should be at least the key "model_ids" which contains a list of model ids to be tested. The other keys should be the name of the config class post-fixed with "_kwargs" and the value should be a dictionary containing the parameters to be tested for that config class. filter_params_func (`callable`, `optional`): A function that takes a list of tuples and returns a list of tuples. This function is used to filter out the tests that needs for example to be skipped. Returns: generated_tests (`list`): A list of tuples containing the name of the test, the model id, the config class and the config class kwargs. """ generated_tests = [] model_list = grid_parameters["model_ids"] task_type = grid_parameters["task_type"] if "task_type" in grid_parameters else None for model_id in model_list: for key, value in self.items(): if "{}_kwargs".format(key) in grid_parameters: peft_configs = [] current_peft_config = value[1].copy() for current_key, current_value in grid_parameters[f"{key}_kwargs"].items(): for kwarg in current_value: current_peft_config.update({current_key: kwarg}) if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs.append(current_peft_config.copy()) else: current_peft_config = value[1].copy() if task_type is not None: current_peft_config.update({"task_type": task_type}) peft_configs = [current_peft_config] for peft_config in peft_configs: generated_tests.append((f"test_{model_id}_{key}", model_id, value[0], peft_config)) if filter_params_func is not None: generated_tests = filter_params_func(generated_tests) return generated_tests PeftTestConfigManager = ClassInstantier(CLASSES_MAPPING) class PeftCommonTester: r""" A large testing suite for testing common functionality of the PEFT models. Attributes: torch_device (`torch.device`): The device on which the tests will be run. transformers_class (`transformers.PreTrainedModel`): The transformers class that is being tested. """ torch_device = infer_device() transformers_class = None def prepare_inputs_for_common(self): raise NotImplementedError def _test_model_attr(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) self.assertTrue(hasattr(model, "save_pretrained")) self.assertTrue(hasattr(model, "from_pretrained")) self.assertTrue(hasattr(model, "push_to_hub")) def _test_adapter_name(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter") correctly_converted = False for n, _ in model.named_parameters(): if "test-adapter" in n: correctly_converted = True break self.assertTrue(correctly_converted) def _test_prepare_for_training(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) self.assertTrue(not dummy_output.requires_grad) # load with `prepare_model_for_int8_training` model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) model = prepare_model_for_int8_training(model) for param in model.parameters(): self.assertTrue(not param.requires_grad) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) # For backward compatibility if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() else: def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) dummy_input = self.prepare_inputs_for_testing() dummy_output = model.get_input_embeddings()(dummy_input["input_ids"]) self.assertTrue(dummy_output.requires_grad) def _test_save_pretrained(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys self.assertEqual(state_dict.keys(), state_dict_from_pretrained.keys()) # check if tensors equal for key in state_dict.keys(): self.assertTrue( torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) ) # check if `adapter_model.bin` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))) # check if `adapter_config.json` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))) # check if `pytorch_model.bin` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin"))) # check if `config.json` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json"))) def _test_save_pretrained_selected_adapters(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) new_adapter_config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model.add_adapter("new_adapter", new_adapter_config) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) model_from_pretrained.load_adapter(tmp_dirname, "new_adapter") # check if the state dicts are equal state_dict = get_peft_model_state_dict(model) state_dict_from_pretrained = get_peft_model_state_dict(model_from_pretrained) # check if same keys self.assertEqual(state_dict.keys(), state_dict_from_pretrained.keys()) # check if tensors equal for key in state_dict.keys(): self.assertTrue( torch.allclose( state_dict[key].to(self.torch_device), state_dict_from_pretrained[key].to(self.torch_device) ) ) # check if `adapter_model.bin` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_model.bin"))) # check if `adapter_config.json` is present self.assertTrue(os.path.exists(os.path.join(tmp_dirname, "adapter_config.json"))) # check if `pytorch_model.bin` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "pytorch_model.bin"))) # check if `config.json` is not present self.assertFalse(os.path.exists(os.path.join(tmp_dirname, "config.json"))) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, selected_adapters=["default"]) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname) self.assertTrue("default" in model_from_pretrained.peft_config.keys()) self.assertTrue("new_adapter" not in model_from_pretrained.peft_config.keys()) def _test_from_pretrained_config_construction(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls(base_model_name_or_path=model_id, **config_kwargs) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained( model_from_pretrained, tmp_dirname, is_trainable=False, config=config ) self.assertTrue(model_from_pretrained.peft_config["default"].inference_mode) self.assertIs(model_from_pretrained.peft_config["default"], config) def _test_merge_layers(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config): # Merge layers only supported for LoRA and IA³ return if ("gpt2" in model_id.lower()) and (config_cls != LoraConfig): self.skipTest("Merging GPT2 adapters not supported for IA³ (yet)") model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if config.peft_type not in ("IA3", "LORA"): with self.assertRaises(AttributeError): model = model.merge_and_unload() dummy_input = self.prepare_inputs_for_testing() model.eval() logits_unmerged = model(**dummy_input)[0] model = model.merge_and_unload() logits_merged = model(**dummy_input)[0] self.assertTrue(torch.allclose(logits_unmerged, logits_merged, atol=1e-4, rtol=1e-4)) # For this test to work, init_lora_weights must be False. This ensures that weights are not initialized to # the identity transform. transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] self.assertFalse(torch.allclose(logits_merged, logits_transformers, atol=1e-10, rtol=1e-10)) # test that the logits are identical after a save-load-roundtrip if hasattr(model, "save_pretrained"): # model is a transformers model with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(tmp_dirname).to(self.torch_device) else: # model is not a transformers model model_from_pretrained = pickle.loads(pickle.dumps(model)) logits_merged_from_pretrained = model_from_pretrained(**dummy_input)[0] self.assertTrue(torch.allclose(logits_merged, logits_merged_from_pretrained, atol=1e-4, rtol=1e-4)) def _test_generate(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `generate` works _ = model.generate(**inputs) with self.assertRaises(TypeError): # check if `generate` raises an error if no positional arguments are passed _ = model.generate(inputs["input_ids"]) def _test_generate_half_prec(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig, PrefixTuningConfig): return model = self.transformers_class.from_pretrained(model_id, torch_dtype=torch.bfloat16) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) input_ids = torch.LongTensor([[1, 1, 1], [2, 1, 2]]).to(self.torch_device) attention_mask = torch.LongTensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) # check if `generate` works _ = model.generate(input_ids=input_ids, attention_mask=attention_mask) with self.assertRaises(TypeError): # check if `generate` raises an error if no positional arguments are passed _ = model.generate(input_ids, attention_mask=attention_mask) def _test_prefix_tuning_half_prec_conversion(self, model_id, config_cls, config_kwargs): if config_cls not in (PrefixTuningConfig,): return config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.half() self.assertEqual(model.base_model_torch_dtype, torch.float16) def _test_training(self, model_id, config_cls, config_kwargs): if config_cls not in (IA3Config, LoraConfig): return model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = "ia3" if config_cls == IA3Config else "lora" for n, param in model.named_parameters(): if (parameter_prefix in n) or ("modules_to_save" in n): self.assertIsNotNone(param.grad) else: self.assertIsNone(param.grad) def _test_inference_safetensors(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() # set to eval mode, since things like dropout can affect the output otherwise model.eval() logits = model(**inputs)[0][0] with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname, safe_serialization=True) self.assertTrue("adapter_model.safetensors" in os.listdir(tmp_dirname)) self.assertTrue("adapter_model.bin" not in os.listdir(tmp_dirname)) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] self.assertTrue(torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4)) def _test_training_layer_indexing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return config = config_cls( base_model_name_or_path=model_id, layers_to_transform=[0], **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] logits = output[0] loss = output.sum() loss.backward() nb_trainable = 0 for n, param in model.named_parameters(): if "lora" in n: self.assertIsNotNone(param.grad) nb_trainable += 1 else: self.assertIsNone(param.grad) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) model_from_pretrained = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname).to(self.torch_device) logits_from_pretrained = model_from_pretrained(**inputs)[0][0] self.assertTrue(torch.allclose(logits, logits_from_pretrained, atol=1e-4, rtol=1e-4)) model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) nb_trainable_all = 0 for n, param in model.named_parameters(): if "lora" in n: nb_trainable_all += 1 self.assertLess(nb_trainable, nb_trainable_all) def _test_training_gradient_checkpointing(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig, IA3Config): return model = self.transformers_class.from_pretrained(model_id) if not getattr(model, "supports_gradient_checkpointing", False): return model.gradient_checkpointing_enable() config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() parameter_prefix = "ia3" if config_cls == IA3Config else "lora" for n, param in model.named_parameters(): if parameter_prefix in n: self.assertIsNotNone(param.grad) else: self.assertIsNone(param.grad) def _test_peft_model_device_map(self, model_id, config_cls, config_kwargs): if config_cls not in (LoraConfig,): return config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config) model = model.to(self.torch_device) with tempfile.TemporaryDirectory() as tmp_dirname: model.save_pretrained(tmp_dirname) model_from_pretrained = self.transformers_class.from_pretrained(model_id) _ = PeftModel.from_pretrained(model_from_pretrained, tmp_dirname, device_map={"": "cpu"}).to( self.torch_device ) def _test_training_prompt_learning_tasks(self, model_id, config_cls, config_kwargs): if not issubclass(config_cls, PromptLearningConfig): return model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) inputs = self.prepare_inputs_for_testing() # check if `training` works output = model(**inputs)[0] loss = output.sum() loss.backward() # check that prompt encoder has grads for param in model.prompt_encoder.parameters(): self.assertIsNotNone(param.grad) def _test_delete_adapter(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) adapter_to_delete = "delete_me" model = get_peft_model(model, config) model.add_adapter(adapter_to_delete, config) model.set_adapter(adapter_to_delete) model = model.to(self.torch_device) if config.peft_type not in ("LORA"): with self.assertRaises(AttributeError): model.delete_adapter(adapter_to_delete) else: model.delete_adapter(adapter_to_delete) self.assertFalse(adapter_to_delete in model.peft_config) key_list = [key for key, _ in model.named_modules() if "lora" not in key] for key in key_list: _, target, _ = _get_submodules(model, key) if isinstance(target, LoraLayer): for attr in [ "r", "lora_alpha", "scaling", "lora_A", "lora_B", "lora_embedding_A", "lora_embedding_B", "lora_dropout", ]: self.assertFalse(adapter_to_delete in getattr(target, attr)) def _test_unload_adapter(self, model_id, config_cls, config_kwargs): model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config) model = model.to(self.torch_device) if config.peft_type not in ("LORA", "ADALORA"): with self.assertRaises(AttributeError): model = model.unload() else: dummy_input = self.prepare_inputs_for_testing() logits_with_lora = model(**dummy_input)[0] transformers_model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) logits_transformers = transformers_model(**dummy_input)[0] model.eval() model = model.unload() logits_unload = model(**dummy_input)[0] self.assertFalse(torch.allclose(logits_with_lora, logits_unload, atol=1e-10, rtol=1e-10)) self.assertTrue(torch.allclose(logits_transformers, logits_unload, atol=1e-4, rtol=1e-4)) def _test_weighted_combination_of_adapters(self, model_id, config_cls, config_kwargs): if issubclass(config_cls, AdaLoraConfig): # AdaLora does not support adding more than 1 adapter return adapter_list = ["adapter1", "adapter_2", "adapter_3"] weight_list = [0.5, 1.5, 1.5] model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) if not isinstance(config, (LoraConfig)): return model = get_peft_model(model, config, adapter_list[0]) model.add_adapter(adapter_list[1], config) model.add_adapter(adapter_list[2], replace(config, r=20)) model = model.to(self.torch_device) # test re-weighting single adapter model.add_weighted_adapter([adapter_list[0]], [weight_list[0]], "single_adapter_reweighting") # test svd re-weighting with multiple adapters model.add_weighted_adapter(adapter_list[1:], weight_list[1:], "multi_adapter_svd_reweighting") # test linear re-weighting with multiple adapters model.add_weighted_adapter( adapter_list[:2], weight_list[:2], "multi_adapter_linear_reweighting", combination_type="linear" ) with self.assertRaises(ValueError): model.add_weighted_adapter( adapter_list[1:], weight_list[1:], "multi_adapter_linear_reweighting_uneven_r", combination_type="linear", ) new_adapters = [ "single_adapter_reweighting", "multi_adapter_svd_reweighting", "multi_adapter_linear_reweighting", ] for new_adapter in new_adapters: self.assertTrue(new_adapter in model.peft_config) key_list = [key for key, _ in model.named_modules() if "lora" not in key] for key in key_list: _, target, _ = _get_submodules(model, key) if isinstance(target, LoraLayer): for adapter_name in new_adapters: if "single" in adapter_name: new_delta_weight = target.get_delta_weight(adapter_name) weighted_original_delta_weights = target.get_delta_weight(adapter_list[0]) * weight_list[0] self.assertTrue( torch.allclose(new_delta_weight, weighted_original_delta_weights, atol=1e-4, rtol=1e-4) ) elif "svd" in adapter_name: self.assertTrue(target.r[adapter_name] == 20) elif "linear" in adapter_name: self.assertTrue(target.r[adapter_name] == 8) for adapter_name in new_adapters: # ensuring new adapters pass the forward loop model.set_adapter(adapter_name) dummy_input = self.prepare_inputs_for_testing() model.eval() _ = model(**dummy_input)[0] def _test_disable_adapter(self, model_id, config_cls, config_kwargs): task_type = config_kwargs.get("task_type") if (task_type == "SEQ_2_SEQ_LM") and (config_cls in (PromptTuningConfig, PromptEncoderConfig)): self.skipTest("Seq2Seq + prompt tuning/prompt encoder does not work with disabling adapters") def get_output(model): # helper function that works with different model types torch.manual_seed(0) if hasattr(model, "generate"): # let's check the scores, not the output ids, since the latter can easily be identical even if the # weights are slightly changed output = model.generate(**input, return_dict_in_generate=True, output_scores=True).scores[0] # take element 0, as output is a tuple else: output = model(**input) if hasattr(output, "images"): # for SD import numpy as np img = output.images[0] return torch.from_numpy(np.array(img)) return output # initialize model model = self.transformers_class.from_pretrained(model_id).to(self.torch_device) # output from BASE MODEL input = self.prepare_inputs_for_testing() output_before = get_output(model) # output from PEFT MODEL if hasattr(self, "instantiate_sd_peft"): # SD models are instantiated differently peft_model = self.instantiate_sd_peft(model_id, config_cls, config_kwargs) else: config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) peft_model = get_peft_model(model, config) output_peft = get_output(peft_model) # first check trivial case is not true that peft does not affect the output; for this to work, init_lora_weight # must be False if isinstance(peft_model, StableDiffusionPipeline): # for SD, check that most pixels have different values self.assertTrue((output_before != output_peft).float().mean() > 0.9) else: self.assertFalse(torch.allclose(output_before, output_peft)) # output with DISABLED ADAPTER if isinstance(peft_model, StableDiffusionPipeline): with peft_model.unet.disable_adapter(): with peft_model.text_encoder.disable_adapter(): output_peft_disabled = get_output(peft_model) # for SD, very rarely, a pixel can differ self.assertTrue((output_before != output_peft_disabled).float().mean() < 1e-4) else: with peft_model.disable_adapter(): output_peft_disabled = get_output(peft_model) self.assertTrue(torch.allclose(output_before, output_peft_disabled, atol=1e-6, rtol=1e-6)) # TODO: add tests to check if disabling adapters works after calling merge_adapter def _test_adding_multiple_adapters_with_bias_raises(self, model_id, config_cls, config_kwargs): # When trying to add multiple adapters with bias in Lora or AdaLora, an error should be # raised. Also, the peft model should not be left in a half-initialized state. if not issubclass(config_cls, (LoraConfig, AdaLoraConfig)): return config_kwargs = config_kwargs.copy() config_kwargs["bias"] = "all" config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = self.transformers_class.from_pretrained(model_id) model = get_peft_model(model, config, "adapter0") with self.assertRaises(ValueError): model.add_adapter("adapter1", replace(config, r=20)) # (superficial) test that the model is not left in a half-initialized state when adding an adapter fails self.assertFalse("adapter1" in model.peft_config) self.assertFalse("adapter1" in model.base_model.peft_config) def _test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): # https://github.com/huggingface/peft/issues/727 model = self.transformers_class.from_pretrained(model_id) config = config_cls( base_model_name_or_path=model_id, **config_kwargs, ) model = get_peft_model(model, config, adapter_name="test-adapter").to(self.torch_device) dummy_input = self.prepare_inputs_for_testing() inputs_embeds = model.get_input_embeddings()(dummy_input["input_ids"]) # just check that no error is raised model.forward(inputs_embeds=inputs_embeds)
0
hf_public_repos/peft
hf_public_repos/peft/tests/testing_utils.py
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from contextlib import contextmanager import numpy as np import torch from peft.import_utils import is_auto_gptq_available, is_optimum_available def require_torch_gpu(test_case): """ Decorator marking a test that requires a GPU. Will be skipped when no GPU is available. """ if not torch.cuda.is_available(): return unittest.skip("test requires GPU")(test_case) else: return test_case def require_torch_multi_gpu(test_case): """ Decorator marking a test that requires multiple GPUs. Will be skipped when less than 2 GPUs are available. """ if not torch.cuda.is_available() or torch.cuda.device_count() < 2: return unittest.skip("test requires multiple GPUs")(test_case) else: return test_case def require_bitsandbytes(test_case): """ Decorator marking a test that requires the bitsandbytes library. Will be skipped when the library is not installed. """ try: import bitsandbytes # noqa: F401 except ImportError: return unittest.skip("test requires bitsandbytes")(test_case) else: return test_case def require_auto_gptq(test_case): """ Decorator marking a test that requires auto-gptq. These tests are skipped when auto-gptq isn't installed. """ return unittest.skipUnless(is_auto_gptq_available(), "test requires auto-gptq")(test_case) def require_optimum(test_case): """ Decorator marking a test that requires optimum. These tests are skipped when optimum isn't installed. """ return unittest.skipUnless(is_optimum_available(), "test requires optimum")(test_case) @contextmanager def temp_seed(seed: int): """Temporarily set the random seed. This works for python numpy, pytorch.""" np_state = np.random.get_state() np.random.seed(seed) torch_state = torch.random.get_rng_state() torch.random.manual_seed(seed) if torch.cuda.is_available(): torch_cuda_states = torch.cuda.get_rng_state_all() torch.cuda.manual_seed_all(seed) try: yield finally: np.random.set_state(np_state) torch.random.set_rng_state(torch_state) if torch.cuda.is_available(): torch.cuda.set_rng_state_all(torch_cuda_states)
0