|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import unittest |
|
|
|
import torch |
|
from parameterized import parameterized |
|
from transformers import AutoModelForSeq2SeqLM |
|
|
|
from .testing_common import PeftCommonTester, PeftTestConfigManager |
|
|
|
|
|
PEFT_ENCODER_DECODER_MODELS_TO_TEST = [ |
|
"ybelkada/tiny-random-T5ForConditionalGeneration-calibrated", |
|
"hf-internal-testing/tiny-random-BartForConditionalGeneration", |
|
] |
|
|
|
FULL_GRID = {"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, "task_type": "SEQ_2_SEQ_LM"} |
|
|
|
|
|
def skip_non_lora_or_pt(test_list): |
|
r""" |
|
Skip tests that are not lora or prefix tuning |
|
""" |
|
return [test for test in test_list if ("lora" in test[0] or "prefix_tuning" in test[0])] |
|
|
|
|
|
class PeftEncoderDecoderModelTester(unittest.TestCase, PeftCommonTester): |
|
r""" |
|
Test if the PeftModel behaves as expected. This includes: |
|
- test if the model has the expected methods |
|
|
|
We use parametrized.expand for debugging purposes to test each model individually. |
|
""" |
|
transformers_class = AutoModelForSeq2SeqLM |
|
|
|
def prepare_inputs_for_testing(self): |
|
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) |
|
decoder_input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) |
|
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) |
|
|
|
input_dict = { |
|
"input_ids": input_ids, |
|
"decoder_input_ids": decoder_input_ids, |
|
"attention_mask": attention_mask, |
|
} |
|
|
|
return input_dict |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_model_attr(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_prepare_for_training(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_save_pretrained(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand( |
|
PeftTestConfigManager.get_grid_parameters( |
|
{ |
|
"model_ids": PEFT_ENCODER_DECODER_MODELS_TO_TEST, |
|
"lora_kwargs": {"init_lora_weights": [False]}, |
|
"task_type": "SEQ_2_SEQ_LM", |
|
}, |
|
) |
|
) |
|
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_merge_layers(model_id, config_cls, config_kwargs) |
|
|
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_lora_or_pt)) |
|
def test_generate(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_generate(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_generate_half_prec(model_id, config_cls, config_kwargs) |
|
|
|
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) |
|
def test_training_encoder_decoders(self, test_name, model_id, config_cls, config_kwargs): |
|
self._test_training(model_id, config_cls, config_kwargs) |
|
|