repo_id
stringclasses
55 values
file_path
stringlengths
42
186
content
stringlengths
1
333k
__index_level_0__
int64
0
0
mavonic_private_repos/transformers/tests/quantization
mavonic_private_repos/transformers/tests/quantization/gptq/test_gptq.py
# coding=utf-8 # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import pytest from transformers import AutoModelForCausalLM, AutoTokenizer, GPTQConfig from transformers.testing_utils import ( is_torch_available, require_accelerate, require_auto_gptq, require_optimum, require_torch_gpu, require_torch_multi_gpu, slow, ) if is_torch_available(): import torch class GPTQConfigTest(unittest.TestCase): def test_bits(self): with self.assertRaises(ValueError): GPTQConfig(bits="") GPTQConfig(bits=1) GPTQConfig(bits=2) GPTQConfig(bits=4) def test_dataset(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, dataset="auto_gpt") GPTQConfig(bits=2, dataset="c4") GPTQConfig(bits=2, dataset="ptb-new") def test_damp_percent(self): with self.assertRaises(ValueError): GPTQConfig(bits=2, damp_percent=10) GPTQConfig(bits=2, damp_percent=-1) GPTQConfig(bits=2, damp_percent="0") GPTQConfig(bits=2, damp_percent=0.01) def test_to_dict(self): quantization_config = GPTQConfig(bits=2) quantization_config.to_dict() def test_from_dict(self): dict = {"bits": 2} quantization_config = GPTQConfig.from_dict(dict) self.assertEqual(dict["bits"], quantization_config.bits) @require_optimum def test_optimum_config(self): from optimum.gptq import GPTQQuantizer config = GPTQConfig(bits=2) optimum_config = GPTQQuantizer.from_dict(config.to_dict_optimum()) self.assertEqual(optimum_config.bits, config.bits) new_config = GPTQConfig.from_dict_optimum(optimum_config.to_dict()) self.assertEqual(optimum_config.bits, new_config.bits) @slow @require_optimum @require_auto_gptq @require_torch_gpu class GPTQTest(unittest.TestCase): model_name = "bigscience/bloom-560m" input_text = "Hello my name is" EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello my name is John and I am a professional photographer. I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a professional photographer and I") EXPECTED_OUTPUTS.add("Hello my name is John, I am a student in the University of") EXPECTED_OUTPUTS.add("Hello my name is John and I am a very good looking man.") EXPECTED_OUTPUTS.add("Hello my name is Alyson, I am a student in the") EXPECTED_OUTPUTS.add("Hello my name is Alyson and I am a very sweet,") # this seems a little small considering that we are doing 4bit quant but we have a small model and ww don't quantize the embeddings EXPECTED_RELATIVE_DIFFERENCE = 1.664253062 bits = 4 group_size = 128 desc_act = False use_exllama = False dataset = [ "auto-gptq is an easy-to-use model quantization library with user-friendly apis, based on GPTQ algorithm." ] device_map = None # called only once for all test in this class @classmethod def setUpClass(cls): """ Setup quantized model """ cls.model_fp16 = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map ) cls.mem_fp16 = cls.model_fp16.get_memory_footprint() cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) quantization_config = GPTQConfig( bits=cls.bits, dataset=cls.dataset, tokenizer=cls.tokenizer, group_size=cls.group_size, desc_act=cls.desc_act, use_exllama=cls.use_exllama, ) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map=cls.device_map, quantization_config=quantization_config, ) def test_memory_footprint(self): r""" A simple test to check if the model conversion has been done correctly by checking on the memory footprint of the converted model """ mem_quantized = self.quantized_model.get_memory_footprint() self.assertAlmostEqual(self.mem_fp16 / mem_quantized, self.EXPECTED_RELATIVE_DIFFERENCE) def test_device_and_dtype_assignment(self): r""" Test whether trying to cast (or assigning a device to) a model after quantization will throw an error. Checks also if other models are casted correctly. """ # This should work if self.device_map is None: _ = self.quantized_model.to(0) with self.assertRaises(ValueError): # Tries with a `dtype`` self.quantized_model.to(torch.float16) def test_original_dtype(self): r""" A simple test to check if the model succesfully stores the original dtype """ self.assertTrue(hasattr(self.quantized_model.config, "_pre_quantization_dtype")) self.assertFalse(hasattr(self.model_fp16.config, "_pre_quantization_dtype")) self.assertTrue(self.quantized_model.config._pre_quantization_dtype == torch.float16) def test_quantized_layers_class(self): """ Simple test to check if the model conversion has been done correctly by checking on the class type of the linear layers of the converted models """ from auto_gptq.utils.import_utils import dynamically_import_QuantLinear QuantLinear = dynamically_import_QuantLinear( use_triton=False, desc_act=self.desc_act, group_size=self.group_size, bits=self.bits, disable_exllama=not self.use_exllama, disable_exllamav2=True, ) self.assertTrue(self.quantized_model.transformer.h[0].mlp.dense_4h_to_h.__class__ == QuantLinear) def check_inference_correctness(self, model): r""" Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def check_quantized_layers_type(self, model, value): self.assertTrue(model.transformer.h[0].mlp.dense_4h_to_h.QUANT_TYPE == value) def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ if self.device_map is None: self.check_inference_correctness(self.quantized_model.to(0)) else: self.check_inference_correctness(self.quantized_model) def test_serialization(self): """ Test the serialization of the model and the loading of the quantized weights works """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=False, bits=4) ).to(0) self.check_quantized_layers_type(quantized_model_from_saved, "cuda-old") else: # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map={"": 0}) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate def test_serialization_big_model_inference(self): """ Test the serialization of the model and the loading of the quantized weights with big model inference """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) quantized_model_from_saved = AutoModelForCausalLM.from_pretrained(tmpdirname, device_map="auto") self.check_inference_correctness(quantized_model_from_saved) def test_change_loading_attributes(self): """ Test the serialization of the model and the loading of the quantized weights works with another config file """ with tempfile.TemporaryDirectory() as tmpdirname: self.quantized_model.save_pretrained(tmpdirname) if not self.use_exllama: self.check_quantized_layers_type(self.quantized_model, "cuda-old") # we need to put it directly to the gpu. Otherwise, we won't be able to initialize the exllama kernel quantized_model_from_saved = AutoModelForCausalLM.from_pretrained( tmpdirname, quantization_config=GPTQConfig(use_exllama=True, bits=4), device_map={"": 0} ) self.assertEqual(quantized_model_from_saved.config.quantization_config.bits, self.bits) self.check_quantized_layers_type(quantized_model_from_saved, "exllama") self.check_inference_correctness(quantized_model_from_saved) @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMap(GPTQTest): device_map = "auto" @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapExllama(GPTQTest): device_map = "auto" use_exllama = True @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestActOrderExllama(unittest.TestCase): """ Test GPTQ model with exllama kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, max_input_length=4028) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllama") def test_generate_quality(self): """ Simple test to check the quality of the model by comparing the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) def test_max_input_length(self): """ Test if the max_input_length works. It modifies the maximum input length that of the model that runs with exllama backend. """ prompt = "I am in Paris and" * 1000 inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] > 4028) with self.assertRaises(RuntimeError) as cm: self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) self.assertTrue("temp_state buffer is too small" in str(cm.exception)) prompt = "I am in Paris and" inp = self.tokenizer(prompt, return_tensors="pt").to(0) self.assertTrue(inp["input_ids"].shape[1] < 4028) self.quantized_model.generate(**inp, num_beams=1, min_new_tokens=3, max_new_tokens=3) @slow @require_optimum @require_auto_gptq @require_torch_gpu @require_accelerate class GPTQTestExllamaV2(unittest.TestCase): """ Test GPTQ model with exllamav2 kernel and desc_act=True (also known as act-order). More information on those arguments here: https://huggingface.co/docs/transformers/main_classes/quantization#transformers.GPTQConfig """ EXPECTED_OUTPUTS = set() EXPECTED_OUTPUTS.add("Hello, how are you ? I'm doing good, thanks for asking.") # 4bit + act_order + 128g model_name = "hf-internal-testing/TinyLlama-1.1B-Chat-v0.3-GPTQ" input_text = "Hello, how are you ?" @classmethod def setUpClass(cls): """ Setup quantized model """ cls.quantization_config = GPTQConfig(bits=4, exllama_config={"version": 2}) cls.quantized_model = AutoModelForCausalLM.from_pretrained( cls.model_name, torch_dtype=torch.float16, device_map={"": 0}, quantization_config=cls.quantization_config, ) cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, use_fast=True) def test_quantized_layers_type(self): self.assertTrue(self.quantized_model.model.layers[0].self_attn.k_proj.QUANT_TYPE == "exllamav2") def check_inference_correctness(self, model): """ Test the generation quality of the quantized model and see that we are matching the expected output. Given that we are operating on small numbers + the testing model is relatively small, we might not get the same output across GPUs. So we'll generate few tokens (5-10) and check their output. """ # Check that inference pass works on the model encoded_input = self.tokenizer(self.input_text, return_tensors="pt") # Check the exactness of the results output_sequences = model.generate(input_ids=encoded_input["input_ids"].to(0), max_new_tokens=10) # Get the generation self.assertIn(self.tokenizer.decode(output_sequences[0], skip_special_tokens=True), self.EXPECTED_OUTPUTS) def test_generate_quality(self): """ Simple test to check the quality of the model by comapring the the generated tokens with the expected tokens """ self.check_inference_correctness(self.quantized_model) # fail when run all together @pytest.mark.skip @require_accelerate @require_torch_multi_gpu class GPTQTestDeviceMapCPUOffload(GPTQTest): device_map = { "transformer.word_embeddings": 0, "transformer.word_embeddings_layernorm": 0, "lm_head": 0, "transformer.h.0": 0, "transformer.h.1": 0, "transformer.h.2": 0, "transformer.h.3": 0, "transformer.h.4": 0, "transformer.h.5": 0, "transformer.h.6": 0, "transformer.h.7": 0, "transformer.h.8": 0, "transformer.h.9": 0, "transformer.h.10": 1, "transformer.h.11": 1, "transformer.h.12": 1, "transformer.h.13": 1, "transformer.h.14": 1, "transformer.h.15": 1, "transformer.h.16": 1, "transformer.h.17": 0, "transformer.h.18": "cpu", "transformer.h.19": "cpu", "transformer.h.20": "cpu", "transformer.h.21": "cpu", "transformer.h.22": "cpu", "transformer.h.23": 1, "transformer.ln_f": 0, }
0
mavonic_private_repos/transformers/tests/quantization
mavonic_private_repos/transformers/tests/quantization/hqq/test_hqq.py
# coding=utf-8 # Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import AutoModelForCausalLM, AutoTokenizer, HqqConfig from transformers.testing_utils import ( require_accelerate, require_torch_gpu, require_torch_multi_gpu, slow, torch_device, ) from transformers.utils import is_hqq_available, is_torch_available if is_torch_available(): import torch if is_hqq_available(): from hqq.core.quantize import HQQBackend, HQQLinear class HQQLLMRunner: def __init__(self, model_id, quant_config, compute_dtype, device, cache_dir=None): self.model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=compute_dtype, device_map=device, quantization_config=quant_config, low_cpu_mem_usage=True, cache_dir=cache_dir, ) self.tokenizer = AutoTokenizer.from_pretrained(model_id, cache_dir=cache_dir) self.device = self.model.device HQQLinear.set_backend(HQQBackend.PYTORCH) def cleanup(): torch.cuda.empty_cache() gc.collect() def check_hqqlayer(test_module, hqq_layer, batch_size=1, context_size=1024): # Test HQQ layer W_dequant = hqq_layer.dequantize() # Reconstructed weights inputs = ( torch.randn( (batch_size, context_size, hqq_layer.meta["shape"][1]), device=hqq_layer.device, dtype=hqq_layer.compute_dtype, ) / 10.0 ) with torch.no_grad(): outputs = hqq_layer(inputs) test_module.assertEqual(outputs.shape[-1], W_dequant.shape[0]) test_module.assertEqual(outputs.dtype, hqq_layer.compute_dtype) del W_dequant, inputs, outputs cleanup() def check_forward(test_module, model, batch_size=1, context_size=1024): # Test forward pass with torch.no_grad(): out = model(torch.zeros([batch_size, context_size], device=model.device, dtype=torch.int32)).logits test_module.assertEqual(out.shape[0], batch_size) test_module.assertEqual(out.shape[1], context_size) cleanup() MODEL_ID = "TinyLlama/TinyLlama-1.1B-Chat-v1.0" @require_torch_gpu class HqqConfigTest(unittest.TestCase): def test_to_dict(self): """ Makes sure the config format is properly set """ quantization_config = HqqConfig() hqq_orig_config = quantization_config.to_dict() for key in hqq_orig_config: self.assertEqual(quantization_config.quant_config[key], hqq_orig_config[key]) @slow @require_torch_gpu @require_accelerate class HQQTest(unittest.TestCase): def tearDown(self): cleanup() def test_fp16_quantized_model(self): """ Simple LLM model testing fp16 """ quant_config = HqqConfig(nbits=8, group_size=64, quant_zero=False, quant_scale=False, axis=0) hqq_runner = HQQLLMRunner( model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device ) check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj) check_forward(self, hqq_runner.model) def test_f16_quantized_model_with_offloading(self): """ Simple LLM model testing bfp16 with meta-data offloading """ q4_config = {"nbits": 4, "group_size": 64, "quant_zero": False, "quant_scale": False} q3_config = {"nbits": 3, "group_size": 32, "quant_zero": False, "quant_scale": False, "offload_meta": True} quant_config = HqqConfig( dynamic_config={ "self_attn.q_proj": q4_config, "self_attn.k_proj": q4_config, "self_attn.v_proj": q4_config, "self_attn.o_proj": q4_config, "mlp.gate_proj": q3_config, "mlp.up_proj": q3_config, "mlp.down_proj": q3_config, } ) hqq_runner = HQQLLMRunner( model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device=torch_device ) check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj) check_forward(self, hqq_runner.model) @slow @require_torch_gpu @require_torch_multi_gpu @require_accelerate class HQQTestMultiGPU(unittest.TestCase): def tearDown(self): cleanup() def test_fp16_quantized_model_multipgpu(self): """ Simple LLM model testing fp16 with multi-gpu """ quant_config = HqqConfig(nbits=8, group_size=64, quant_zero=False, quant_scale=False, axis=0) hqq_runner = HQQLLMRunner( model_id=MODEL_ID, quant_config=quant_config, compute_dtype=torch.float16, device="auto" ) check_hqqlayer(self, hqq_runner.model.model.layers[0].self_attn.v_proj) check_forward(self, hqq_runner.model)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/optimization/test_optimization.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from torch import nn from transformers import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_inverse_sqrt_schedule, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_wsd_schedule, ) def unwrap_schedule(scheduler, num_steps=10): lrs = [] for _ in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() return lrs def unwrap_and_save_reload_schedule(scheduler, num_steps=10): lrs = [] for step in range(num_steps): lrs.append(scheduler.get_lr()[0]) scheduler.step() if step == num_steps // 2: with tempfile.TemporaryDirectory() as tmpdirname: file_name = os.path.join(tmpdirname, "schedule.bin") torch.save(scheduler.state_dict(), file_name) state_dict = torch.load(file_name) scheduler.load_state_dict(state_dict) return lrs @require_torch class OptimizationTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def test_adam_w(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = AdamW(params=[w], lr=2e-1, weight_decay=0.0) for _ in range(100): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) def test_adafactor(self): w = torch.tensor([0.1, -0.2, -0.1], requires_grad=True) target = torch.tensor([0.4, 0.2, -0.5]) criterion = nn.MSELoss() # No warmup, constant schedule, no gradient clipping optimizer = Adafactor( params=[w], lr=1e-2, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False, ) for _ in range(1000): loss = criterion(w, target) loss.backward() optimizer.step() w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves. w.grad.zero_() self.assertListAlmostEqual(w.tolist(), [0.4, 0.2, -0.5], tol=1e-2) @require_torch class ScheduleInitTest(unittest.TestCase): m = nn.Linear(50, 50) if is_torch_available() else None optimizer = AdamW(m.parameters(), lr=10.0) if is_torch_available() else None num_steps = 10 def assertListAlmostEqual(self, list1, list2, tol, msg=None): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol, msg=msg) def test_schedulers(self): common_kwargs = {"num_warmup_steps": 2, "num_training_steps": 10} # schedulers doct format # function: (sched_args_dict, expected_learning_rates) scheds = { get_constant_schedule: ({}, [10.0] * self.num_steps), get_constant_schedule_with_warmup: ( {"num_warmup_steps": 4}, [0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0], ), get_linear_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25], ), get_cosine_schedule_with_warmup: ( {**common_kwargs}, [0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38], ), get_cosine_with_hard_restarts_schedule_with_warmup: ( {**common_kwargs, "num_cycles": 2}, [0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46], ), get_polynomial_decay_schedule_with_warmup: ( {**common_kwargs, "power": 2.0, "lr_end": 1e-7}, [0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156], ), get_inverse_sqrt_schedule: ( {"num_warmup_steps": 2}, [0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714], ), get_wsd_schedule: ( {"num_warmup_steps": 2, "num_stable_steps": 2, "num_decay_steps": 3, "min_lr_ratio": 0.1}, [0.0, 5.0, 10.0, 10.0, 10.0, 7.75, 3.25, 1.0, 1.0, 1.0], ), } for scheduler_func, data in scheds.items(): kwargs, expected_learning_rates = data scheduler = scheduler_func(self.optimizer, **kwargs) self.assertEqual(len([scheduler.get_lr()[0]]), 1) lrs_1 = unwrap_schedule(scheduler, self.num_steps) self.assertListAlmostEqual( lrs_1, expected_learning_rates, tol=1e-2, msg=f"failed for {scheduler_func} in normal scheduler", ) scheduler = scheduler_func(self.optimizer, **kwargs) if scheduler_func.__name__ != "get_constant_schedule": LambdaScheduleWrapper.wrap_scheduler(scheduler) # wrap to test picklability of the schedule lrs_2 = unwrap_and_save_reload_schedule(scheduler, self.num_steps) self.assertListEqual(lrs_1, lrs_2, msg=f"failed for {scheduler_func} in save and reload") class LambdaScheduleWrapper: """See https://github.com/huggingface/transformers/issues/21689""" def __init__(self, fn): self.fn = fn def __call__(self, *args, **kwargs): return self.fn(*args, **kwargs) @classmethod def wrap_scheduler(self, scheduler): scheduler.lr_lambdas = list(map(self, scheduler.lr_lambdas))
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/optimization/test_optimization_tf.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from tensorflow.python.eager import context from tensorflow.python.framework import ops from transformers import GradientAccumulator, create_optimizer @require_tf class OptimizationFTest(unittest.TestCase): def assertListAlmostEqual(self, list1, list2, tol): self.assertEqual(len(list1), len(list2)) for a, b in zip(list1, list2): self.assertAlmostEqual(a, b, delta=tol) def testGradientAccumulator(self): accumulator = GradientAccumulator() accumulator([tf.constant([1.0, 2.0])]) accumulator([tf.constant([-2.0, 1.0])]) accumulator([tf.constant([-1.0, 2.0])]) with self.assertRaises(ValueError): accumulator([tf.constant([1.0, 1.0]), tf.constant([2.0, 2.0])]) self.assertEqual(accumulator.step, 3) self.assertEqual(len(accumulator.gradients), 1) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [-2.0, 5.0], tol=1e-2) accumulator.reset() self.assertEqual(accumulator.step, 0) self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist(), [0.0, 0.0], tol=1e-2) def testGradientAccumulatorDistributionStrategy(self): context._context = None ops.enable_eager_execution_internal() physical_devices = tf.config.list_physical_devices("CPU") if len(physical_devices) == 1: tf.config.set_logical_device_configuration( physical_devices[0], [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] ) devices = tf.config.list_logical_devices(device_type="CPU") strategy = tf.distribute.MirroredStrategy(devices=devices[:2]) with strategy.scope(): accumulator = GradientAccumulator() variable = tf.Variable([4.0, 3.0]) optimizer, _ = create_optimizer(5e-5, 10, 5) gradient_placeholder = tf.Variable([0.0, 0.0], trainable=False) def accumulate_on_replica(gradient): accumulator([gradient]) def apply_on_replica(): optimizer.apply_gradients(list(zip(accumulator.gradients, [variable]))) @tf.function def accumulate(grad1, grad2): with strategy.scope(): local_variables = strategy.experimental_local_results(gradient_placeholder) local_variables[0].assign(grad1) local_variables[1].assign(grad2) strategy.run(accumulate_on_replica, args=(gradient_placeholder,)) @tf.function def apply_grad(): with strategy.scope(): strategy.run(apply_on_replica) def _check_local_values(grad1, grad2): values = strategy.experimental_local_results(accumulator._gradients[0]) self.assertListAlmostEqual(values[0].value(), grad1, tol=1e-2) self.assertListAlmostEqual(values[1].value(), grad2, tol=1e-2) accumulate([1.0, 2.0], [-1.0, 1.0]) accumulate([3.0, -1.0], [-1.0, -1.0]) accumulate([-2.0, 2.0], [3.0, -2.0]) self.assertEqual(accumulator.step, 3) _check_local_values([2.0, 3.0], [1.0, -2.0]) apply_grad() self.assertListAlmostEqual(variable.value(), [4.0, 3.0], tol=1e-2) accumulator.reset() self.assertEqual(accumulator.step, 0) _check_local_values([0.0, 0.0], [0.0, 0.0])
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/peft_integration/test_peft_integration.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from huggingface_hub import hf_hub_download from transformers import AutoModelForCausalLM, OPTForCausalLM from transformers.testing_utils import ( require_bitsandbytes, require_peft, require_torch, require_torch_gpu, slow, torch_device, ) from transformers.utils import is_torch_available if is_torch_available(): import torch @require_peft @require_torch class PeftTesterMixin: peft_test_model_ids = ("peft-internal-testing/tiny-OPTForCausalLM-lora",) transformers_test_model_ids = ("hf-internal-testing/tiny-random-OPTForCausalLM",) transformers_test_model_classes = (AutoModelForCausalLM, OPTForCausalLM) # TODO: run it with CI after PEFT release. @slow class PeftIntegrationTester(unittest.TestCase, PeftTesterMixin): """ A testing suite that makes sure that the PeftModel class is correctly integrated into the transformers library. """ def _check_lora_correctly_converted(self, model): """ Utility method to check if the model has correctly adapters injected on it. """ from peft.tuners.tuners_utils import BaseTunerLayer is_peft_loaded = False for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break return is_peft_loaded def test_peft_from_pretrained(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This checks if we pass a remote folder that contains an adapter config and adapter weights, it should correctly load a model that has adapters injected on it. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) self.assertTrue(peft_model._hf_peft_config_loaded) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_state_dict(self): """ Simple test that checks if the returned state dict of `get_adapter_state_dict()` method contains the expected keys. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) state_dict = peft_model.get_adapter_state_dict() for key in state_dict.keys(): self.assertTrue("lora" in key) def test_peft_save_pretrained(self): """ Test that checks various combinations of `save_pretrained` with a model that has adapters loaded on it. This checks if the saved model contains the expected files (adapter weights and adapter config). """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("config.json" not in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) peft_model = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(peft_model)) def test_peft_enable_disable_adapters(self): """ A test that checks if `enable_adapters` and `disable_adapters` methods work as expected. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) peft_model.add_adapter(peft_config) peft_logits = peft_model(dummy_input).logits peft_model.disable_adapters() peft_logits_disabled = peft_model(dummy_input).logits peft_model.enable_adapters() peft_logits_enabled = peft_model(dummy_input).logits self.assertTrue(torch.allclose(peft_logits, peft_logits_enabled, atol=1e-12, rtol=1e-12)) self.assertFalse(torch.allclose(peft_logits_enabled, peft_logits_disabled, atol=1e-12, rtol=1e-12)) def test_peft_add_adapter(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) def test_peft_add_adapter_from_pretrained(self): """ Simple test that tests if `add_adapter` works as expected """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model_from_pretrained = transformers_class.from_pretrained(tmpdirname).to(torch_device) self.assertTrue(self._check_lora_correctly_converted(model_from_pretrained)) def test_peft_add_adapter_modules_to_save(self): """ Simple test that tests if `add_adapter` works as expected when training with modules to save. """ from peft import LoraConfig from peft.utils import ModulesToSaveWrapper for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False, modules_to_save=["lm_head"]) model.add_adapter(peft_config) self._check_lora_correctly_converted(model) _has_modules_to_save_wrapper = False for name, module in model.named_modules(): if isinstance(module, ModulesToSaveWrapper): _has_modules_to_save_wrapper = True self.assertTrue(module.modules_to_save.default.weight.requires_grad) self.assertTrue("lm_head" in name) break self.assertTrue(_has_modules_to_save_wrapper) state_dict = model.get_adapter_state_dict() self.assertTrue("lm_head.weight" in state_dict.keys()) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for _, param in model.named_parameters(): if param.requires_grad: self.assertTrue(param.grad is not None) def test_peft_add_adapter_training_gradient_checkpointing(self): """ Simple test that tests if `add_adapter` works as expected when training with gradient checkpointing. """ from peft import LoraConfig for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) self.assertTrue(self._check_lora_correctly_converted(model)) # When attaching adapters the input embeddings will stay frozen, this will # lead to the output embedding having requires_grad=False. dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(frozen_output.requires_grad is False) model.gradient_checkpointing_enable() # Since here we attached the hook, the input should have requires_grad to set # properly non_frozen_output = model.get_input_embeddings()(dummy_input) self.assertTrue(non_frozen_output.requires_grad is True) # To repro the Trainer issue dummy_input.requires_grad = False for name, param in model.named_parameters(): if "lora" in name.lower(): self.assertTrue(param.requires_grad) logits = model(dummy_input).logits loss = logits.mean() loss.backward() for name, param in model.named_parameters(): if param.requires_grad: self.assertTrue("lora" in name.lower()) self.assertTrue(param.grad is not None) def test_peft_add_multi_adapter(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected in multi-adapter setting. """ from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id in self.transformers_test_model_ids: for transformers_class in self.transformers_test_model_classes: is_peft_loaded = False model = transformers_class.from_pretrained(model_id).to(torch_device) logits_original_model = model(dummy_input).logits peft_config = LoraConfig(init_lora_weights=False) model.add_adapter(peft_config) logits_adapter_1 = model(dummy_input) model.add_adapter(peft_config, adapter_name="adapter-2") logits_adapter_2 = model(dummy_input) for _, m in model.named_modules(): if isinstance(m, BaseTunerLayer): is_peft_loaded = True break self.assertTrue(is_peft_loaded) # dummy generation _ = model.generate(input_ids=dummy_input) model.set_adapter("default") self.assertTrue(model.active_adapters() == ["default"]) self.assertTrue(model.active_adapter() == "default") model.set_adapter("adapter-2") self.assertTrue(model.active_adapters() == ["adapter-2"]) self.assertTrue(model.active_adapter() == "adapter-2") # Logits comparison self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_2.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse(torch.allclose(logits_original_model, logits_adapter_2.logits, atol=1e-6, rtol=1e-6)) model.set_adapter(["adapter-2", "default"]) self.assertTrue(model.active_adapters() == ["adapter-2", "default"]) self.assertTrue(model.active_adapter() == "adapter-2") logits_adapter_mixed = model(dummy_input) self.assertFalse( torch.allclose(logits_adapter_1.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) self.assertFalse( torch.allclose(logits_adapter_2.logits, logits_adapter_mixed.logits, atol=1e-6, rtol=1e-6) ) # multi active adapter saving not supported with self.assertRaises(ValueError), tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) @require_torch_gpu @require_bitsandbytes def test_peft_from_pretrained_kwargs(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained` + additional kwargs and see if the integraiton behaves as expected. """ for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) # dummy generation _ = peft_model.generate(input_ids=torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device)) @require_torch_gpu @require_bitsandbytes def test_peft_save_quantized(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname) self.assertTrue("adapter_model.safetensors" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) @require_torch_gpu @require_bitsandbytes def test_peft_save_quantized_regression(self): """ Simple test that tests the basic usage of PEFT model save_pretrained with quantized base models Regression test to make sure everything works as expected before the safetensors integration. """ # 4bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_4bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear4bit") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) # 8-bit for model_id in self.peft_test_model_ids: for transformers_class in self.transformers_test_model_classes: peft_model = transformers_class.from_pretrained(model_id, load_in_8bit=True, device_map="auto") module = peft_model.model.decoder.layers[0].self_attn.v_proj self.assertTrue(module.__class__.__name__ == "Linear8bitLt") self.assertTrue(peft_model.hf_device_map is not None) with tempfile.TemporaryDirectory() as tmpdirname: peft_model.save_pretrained(tmpdirname, safe_serialization=False) self.assertTrue("adapter_model.bin" in os.listdir(tmpdirname)) self.assertTrue("adapter_config.json" in os.listdir(tmpdirname)) self.assertTrue("pytorch_model.bin" not in os.listdir(tmpdirname)) self.assertTrue("model.safetensors" not in os.listdir(tmpdirname)) def test_peft_pipeline(self): """ Simple test that tests the basic usage of PEFT model + pipeline """ from transformers import pipeline for model_id in self.peft_test_model_ids: pipe = pipeline("text-generation", model_id) _ = pipe("Hello") def test_peft_add_adapter_with_state_dict(self): """ Simple test that tests the basic usage of PEFT model through `from_pretrained`. This test tests if add_adapter works as expected with a state_dict being passed. """ from peft import LoraConfig dummy_input = torch.LongTensor([[0, 1, 2, 3, 4, 5, 6, 7]]).to(torch_device) for model_id, peft_model_id in zip(self.transformers_test_model_ids, self.peft_test_model_ids): for transformers_class in self.transformers_test_model_classes: model = transformers_class.from_pretrained(model_id).to(torch_device) peft_config = LoraConfig(init_lora_weights=False) with self.assertRaises(ValueError): model.load_adapter(peft_model_id=None) state_dict_path = hf_hub_download(peft_model_id, "adapter_model.bin") dummy_state_dict = torch.load(state_dict_path) model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=peft_config) with self.assertRaises(ValueError): model.load_adapter(model.load_adapter(adapter_state_dict=dummy_state_dict, peft_config=None)) self.assertTrue(self._check_lora_correctly_converted(model)) # dummy generation _ = model.generate(input_ids=dummy_input) def test_peft_from_pretrained_hub_kwargs(self): """ Tests different combinations of PEFT model + from_pretrained + hub kwargs """ peft_model_id = "peft-internal-testing/tiny-opt-lora-revision" # This should not work with self.assertRaises(OSError): _ = AutoModelForCausalLM.from_pretrained(peft_model_id) adapter_kwargs = {"revision": "test"} # This should work model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) adapter_kwargs = {"revision": "main", "subfolder": "test_subfolder"} model = AutoModelForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model)) model = OPTForCausalLM.from_pretrained(peft_model_id, adapter_kwargs=adapter_kwargs) self.assertTrue(self._check_lora_correctly_converted(model))
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_text_generation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_accelerator, require_torch_gpu, require_torch_or_tf, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def test_small_model_pt(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="pt") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], ) outputs = text_generator(["This is a test", "This is a second test"]) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ], ) outputs = text_generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ) ## -- test tokenizer_kwargs test_str = "testing tokenizer kwargs. using truncation must result in a different generation." input_len = len(text_generator.tokenizer(test_str)["input_ids"]) output_str, output_str_with_truncation = ( text_generator(test_str, do_sample=False, return_full_text=False, min_new_tokens=1)[0]["generated_text"], text_generator( test_str, do_sample=False, return_full_text=False, min_new_tokens=1, truncation=True, max_length=input_len + 1, )[0]["generated_text"], ) assert output_str != output_str_with_truncation # results must be different because one had truncation # -- what is the point of this test? padding is hardcoded False in the pipeline anyway text_generator.tokenizer.pad_token_id = text_generator.model.config.eos_token_id text_generator.tokenizer.pad_token = "<pad>" outputs = text_generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], [ {"generated_token_ids": ANY(list)}, {"generated_token_ids": ANY(list)}, ], ], ) @require_torch def test_small_chat_model_pt(self): text_generator = pipeline( task="text-generation", model="rocketknight1/tiny-gpt2-with-chatml-template", framework="pt" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, {"role": "assistant", "content": "This is a reply"}, ] chat2 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a second test"}, {"role": "assistant", "content": "This is a reply"}, ] outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) expected_chat1 = chat1 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ {"generated_text": expected_chat1}, ], ) outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10) expected_chat2 = chat2 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ [{"generated_text": expected_chat1}], [{"generated_text": expected_chat2}], ], ) @require_torch def test_small_chat_model_with_dataset_pt(self): from torch.utils.data import Dataset from transformers.pipelines.pt_utils import KeyDataset class MyDataset(Dataset): data = [ [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, {"role": "assistant", "content": "This is a reply"}, ], ] def __len__(self): return 1 def __getitem__(self, i): return {"text": self.data[i]} text_generator = pipeline( task="text-generation", model="rocketknight1/tiny-gpt2-with-chatml-template", framework="pt" ) dataset = MyDataset() key_dataset = KeyDataset(dataset, "text") for outputs in text_generator(key_dataset, do_sample=False, max_new_tokens=10): expected_chat = dataset.data[0] + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ {"generated_text": expected_chat}, ], ) @require_tf def test_small_model_tf(self): text_generator = pipeline(task="text-generation", model="sshleifer/tiny-ctrl", framework="tf") # Using `do_sample=False` to force deterministic output outputs = text_generator("This is a test", do_sample=False) self.assertEqual( outputs, [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], ) outputs = text_generator(["This is a test", "This is a second test"], do_sample=False) self.assertEqual( outputs, [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ], ) @require_tf def test_small_chat_model_tf(self): text_generator = pipeline( task="text-generation", model="rocketknight1/tiny-gpt2-with-chatml-template", framework="tf" ) # Using `do_sample=False` to force deterministic output chat1 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a test"}, {"role": "assistant", "content": "This is a reply"}, ] chat2 = [ {"role": "system", "content": "This is a system message."}, {"role": "user", "content": "This is a second test"}, {"role": "assistant", "content": "This is a reply"}, ] outputs = text_generator(chat1, do_sample=False, max_new_tokens=10) expected_chat1 = chat1 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ {"generated_text": expected_chat1}, ], ) outputs = text_generator([chat1, chat2], do_sample=False, max_new_tokens=10) expected_chat2 = chat2 + [ { "role": "assistant", "content": " factors factors factors factors factors factors factors factors factors factors", } ] self.assertEqual( outputs, [ [{"generated_text": expected_chat1}], [{"generated_text": expected_chat2}], ], ) def get_test_pipeline(self, model, tokenizer, processor): text_generator = TextGenerationPipeline(model=model, tokenizer=tokenizer) return text_generator, ["This is a test", "Another test"] def test_stop_sequence_stopping_criteria(self): prompt = """Hello I believe in""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") output = text_generator(prompt) self.assertEqual( output, [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}], ) output = text_generator(prompt, stop_sequence=" fe") self.assertEqual(output, [{"generated_text": "Hello I believe in fe"}]) def run_pipeline_test(self, text_generator, _): model = text_generator.model tokenizer = text_generator.tokenizer outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator("This is a test", return_full_text=False) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) text_generator = pipeline(task="text-generation", model=model, tokenizer=tokenizer, return_full_text=False) outputs = text_generator("This is a test") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertNotIn("This is a test", outputs[0]["generated_text"]) outputs = text_generator("This is a test", return_full_text=True) self.assertEqual(outputs, [{"generated_text": ANY(str)}]) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test")) outputs = text_generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) if text_generator.tokenizer.pad_token is not None: outputs = text_generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_text=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_full_text=True, return_tensors=True) with self.assertRaises(ValueError): outputs = text_generator("test", return_text=True, return_tensors=True) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): outputs = text_generator("") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) else: with self.assertRaises((ValueError, AssertionError)): outputs = text_generator("") if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS = [ "RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM", "FuyuForCausalLM", ] if ( tokenizer.model_max_length < 10000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError)): text_generator("This is a test" * 500, max_new_tokens=20) outputs = text_generator("This is a test" * 500, handle_long_generation="hole", max_new_tokens=20) # Hole strategy cannot work with self.assertRaises(ValueError): text_generator( "This is a test" * 500, handle_long_generation="hole", max_new_tokens=tokenizer.model_max_length + 10, ) @require_torch @require_accelerate @require_torch_gpu def test_small_model_pt_bloom_accelerate(self): import torch # Classic `model_kwargs` pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloat16}, ) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto", torch_dtype=torch.bfloat16) self.assertEqual(pipe.model.lm_head.weight.dtype, torch.bfloat16) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 pipe = pipeline(model="hf-internal-testing/tiny-random-bloom", device_map="auto") self.assertEqual(pipe.model.lm_head.weight.dtype, torch.float32) out = pipe("This is a test") self.assertEqual( out, [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ], ) @require_torch @require_torch_accelerator def test_small_model_fp16(self): import torch pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", device=torch_device, torch_dtype=torch.float16, ) pipe("This is a test") @require_torch @require_accelerate @require_torch_accelerator def test_pipeline_accelerate_top_p(self): import torch pipe = pipeline( model="hf-internal-testing/tiny-random-bloom", device_map=torch_device, torch_dtype=torch.float16 ) pipe("This is a test", do_sample=True, top_p=0.5) def test_pipeline_length_setting_warning(self): prompt = """Hello world""" text_generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-gpt2") if text_generator.model.framework == "tf": logger = logging.get_logger("transformers.generation.tf_utils") else: logger = logging.get_logger("transformers.generation.utils") logger_msg = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10, max_new_tokens=1) self.assertIn(logger_msg, cl.out) # The user only sets one -> no warning with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_new_tokens=1) self.assertNotIn(logger_msg, cl.out) with CaptureLogger(logger) as cl: _ = text_generator(prompt, max_length=10) self.assertNotIn(logger_msg, cl.out)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_audio_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING from transformers.pipelines import AudioClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torchaudio, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class AudioClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): audio_classifier = AudioClassificationPipeline(model=model, feature_extractor=processor) # test with a raw waveform audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return audio_classifier, [audio2, audio] def run_pipeline_test(self, audio_classifier, examples): audio2, audio = examples output = audio_classifier(audio) # by default a model is initialized with num_labels=2 self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) output = audio_classifier(audio, top_k=1) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, ], ) self.run_torchaudio(audio_classifier) @require_torchaudio def run_torchaudio(self, audio_classifier): import datasets # test with a local file dataset = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") audio = dataset[0]["audio"]["array"] output = audio_classifier(audio) self.assertEqual( output, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): model = "anton-l/wav2vec2-random-tiny-classifier" audio_classifier = pipeline("audio-classification", model=model) audio = np.ones((8000,)) output = audio_classifier(audio, top_k=4) EXPECTED_OUTPUT = [ {"score": 0.0842, "label": "no"}, {"score": 0.0838, "label": "up"}, {"score": 0.0837, "label": "go"}, {"score": 0.0834, "label": "right"}, ] EXPECTED_OUTPUT_PT_2 = [ {"score": 0.0845, "label": "stop"}, {"score": 0.0844, "label": "on"}, {"score": 0.0841, "label": "right"}, {"score": 0.0834, "label": "left"}, ] self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) audio_dict = {"array": np.ones((8000,)), "sampling_rate": audio_classifier.feature_extractor.sampling_rate} output = audio_classifier(audio_dict, top_k=4) self.assertIn(nested_simplify(output, decimals=4), [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2]) @require_torch @slow def test_large_model_pt(self): import datasets model = "superb/wav2vec2-base-superb-ks" audio_classifier = pipeline("audio-classification", model=model) dataset = datasets.load_dataset("anton-l/superb_dummy", "ks", split="test") audio = np.array(dataset[3]["speech"], dtype=np.float32) output = audio_classifier(audio, top_k=4) self.assertEqual( nested_simplify(output, decimals=3), [ {"score": 0.981, "label": "go"}, {"score": 0.007, "label": "up"}, {"score": 0.006, "label": "_unknown_"}, {"score": 0.001, "label": "down"}, ], ) @require_tf @unittest.skip("Audio classification is not implemented for TF") def test_small_model_tf(self): pass
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_translation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import pytest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MBart50TokenizerFast, MBartConfig, MBartForConditionalGeneration, TranslationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow from .test_pipelines_common import ANY @is_pipeline_test class TranslationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, MBartConfig): src_lang, tgt_lang = list(tokenizer.lang_code_to_id.keys())[:2] translator = TranslationPipeline(model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang) else: translator = TranslationPipeline(model=model, tokenizer=tokenizer) return translator, ["Some string", "Some other text"] def run_pipeline_test(self, translator, _): outputs = translator("Some string") self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}]) outputs = translator(["Some string", "other string"]) self.assertEqual(outputs, [{"translation_text": ANY(str)}, {"translation_text": ANY(str)}]) @require_torch def test_small_model_pt(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_tf def test_small_model_tf(self): translator = pipeline("translation_en_to_ro", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide Beide" " Beide Beide" ) } ], ) @require_torch def test_en_to_de_pt(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="pt") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) @require_tf def test_en_to_de_tf(self): translator = pipeline("translation_en_to_de", model="patrickvonplaten/t5-tiny-random", framework="tf") outputs = translator("This is a test string", max_length=20) self.assertEqual( outputs, [ { "translation_text": ( "monoton monoton monoton monoton monoton monoton monoton monoton monoton monoton urine urine" " urine urine urine urine urine urine urine" ) } ], ) class TranslationNewFormatPipelineTests(unittest.TestCase): @require_torch @slow def test_default_translations(self): # We don't provide a default for this pair with self.assertRaises(ValueError): pipeline(task="translation_cn_to_ar") # but we do for this one translator = pipeline(task="translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch @slow def test_multilingual_translation(self): model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt") translator = pipeline(task="translation", model=model, tokenizer=tokenizer) # Missing src_lang, tgt_lang with self.assertRaises(ValueError): translator("This is a test") outputs = translator("This is a test", src_lang="en_XX", tgt_lang="ar_AR") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) outputs = translator("This is a test", src_lang="en_XX", tgt_lang="hi_IN") self.assertEqual(outputs, [{"translation_text": "यह एक परीक्षण है"}]) # src_lang, tgt_lang can be defined at pipeline call time translator = pipeline(task="translation", model=model, tokenizer=tokenizer, src_lang="en_XX", tgt_lang="ar_AR") outputs = translator("This is a test") self.assertEqual(outputs, [{"translation_text": "هذا إختبار"}]) @require_torch def test_translation_on_odd_language(self): model = "patrickvonplaten/t5-tiny-random" translator = pipeline(task="translation_cn_to_ar", model=model) self.assertEqual(translator._preprocess_params["src_lang"], "cn") self.assertEqual(translator._preprocess_params["tgt_lang"], "ar") @require_torch def test_translation_default_language_selection(self): model = "patrickvonplaten/t5-tiny-random" with pytest.warns(UserWarning, match=r".*translation_en_to_de.*"): translator = pipeline(task="translation", model=model) self.assertEqual(translator.task, "translation_en_to_de") self.assertEqual(translator._preprocess_params["src_lang"], "en") self.assertEqual(translator._preprocess_params["tgt_lang"], "de") @require_torch def test_translation_with_no_language_no_model_fails(self): with self.assertRaises(ValueError): pipeline(task="translation")
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_text2text_generation.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, Text2TextGenerationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch from transformers.utils import is_torch_available from .test_pipelines_common import ANY if is_torch_available(): import torch @is_pipeline_test class Text2TextGenerationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): generator = Text2TextGenerationPipeline(model=model, tokenizer=tokenizer) return generator, ["Something to write", "Something else"] def run_pipeline_test(self, generator, _): outputs = generator("Something there") self.assertEqual(outputs, [{"generated_text": ANY(str)}]) # These are encoder decoder, they don't just append to incoming string self.assertFalse(outputs[0]["generated_text"].startswith("Something there")) outputs = generator(["This is great !", "Something else"], num_return_sequences=2, do_sample=True) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) outputs = generator( ["This is great !", "Something else"], num_return_sequences=2, batch_size=2, do_sample=True ) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], [{"generated_text": ANY(str)}, {"generated_text": ANY(str)}], ], ) with self.assertRaises(ValueError): generator(4) @require_torch def test_small_model_pt(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="pt") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}]) num_return_sequences = 3 outputs = generator( "Something there", num_return_sequences=num_return_sequences, num_beams=num_return_sequences, ) target_outputs = [ {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"}, {"generated_text": ""}, ] self.assertEqual(outputs, target_outputs) outputs = generator("This is a test", do_sample=True, num_return_sequences=2, return_tensors=True) self.assertEqual( outputs, [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ) generator.tokenizer.pad_token_id = generator.model.config.eos_token_id generator.tokenizer.pad_token = "<pad>" outputs = generator( ["This is a test", "This is a second test"], do_sample=True, num_return_sequences=2, batch_size=2, return_tensors=True, ) self.assertEqual( outputs, [ [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], [ {"generated_token_ids": ANY(torch.Tensor)}, {"generated_token_ids": ANY(torch.Tensor)}, ], ], ) @require_tf def test_small_model_tf(self): generator = pipeline("text2text-generation", model="patrickvonplaten/t5-tiny-random", framework="tf") # do_sample=False necessary for reproducibility outputs = generator("Something there", do_sample=False) self.assertEqual(outputs, [{"generated_text": ""}])
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_fill_mask.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class FillMaskPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_MASKED_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_MASKED_LM_MAPPING def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) @require_tf def test_small_model_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="tf") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"}, {"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped", }, { "sequence": "The largest city in France is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"}, ], ) @require_torch def test_small_model_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", top_k=2, framework="pt") outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"}, {"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs, decimals=6), [ { "sequence": "The largest city in France is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul", }, {"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"}, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs, decimals=6), [ {"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"}, {"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"}, ], ) outputs = unmasker("My name is <mask> <mask>", top_k=2) self.assertEqual( nested_simplify(outputs, decimals=6), [ [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is Maul<mask></s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"}, ], [ { "score": 2.2e-05, "token": 35676, "token_str": " Maul", "sequence": "<s>My name is<mask> Maul</s>", }, {"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"}, ], ], ) @require_torch_accelerator def test_fp16_casting(self): pipe = pipeline( "fill-mask", model="hf-internal-testing/tiny-random-distilbert", device=torch_device, framework="pt", ) # convert model to fp16 pipe.model.half() response = pipe("Paris is the [MASK] of France.") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(response, list) @slow @require_torch def test_large_model_pt(self): unmasker = pipeline(task="fill-mask", model="distilbert/distilroberta-base", top_k=2, framework="pt") self.run_large_test(unmasker) @slow @require_tf def test_large_model_tf(self): unmasker = pipeline(task="fill-mask", model="distilbert/distilroberta-base", top_k=2, framework="tf") self.run_large_test(unmasker) def run_large_test(self, unmasker): outputs = unmasker("My name is <mask>") self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"}, {"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"}, ], ) outputs = unmasker("The largest city in France is <mask>") self.assertEqual( nested_simplify(outputs), [ { "sequence": "The largest city in France is Paris", "score": 0.251, "token": 2201, "token_str": " Paris", }, { "sequence": "The largest city in France is Lyon", "score": 0.214, "token": 12790, "token_str": " Lyon", }, ], ) outputs = unmasker("My name is <mask>", targets=[" Patrick", " Clara", " Teven"], top_k=3) self.assertEqual( nested_simplify(outputs), [ {"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"}, {"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"}, {"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"}, ], ) dummy_str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit," * 100 outputs = unmasker( "My name is <mask>" + dummy_str, tokenizer_kwargs={"truncation": True}, ) simplified = nested_simplify(outputs, decimals=4) self.assertEqual( [{"sequence": x["sequence"][:100]} for x in simplified], [ {"sequence": f"My name is,{dummy_str}"[:100]}, {"sequence": f"My name is:,{dummy_str}"[:100]}, ], ) self.assertEqual( [{k: x[k] for k in x if k != "sequence"} for x in simplified], [ {"score": 0.2819, "token": 6, "token_str": ","}, {"score": 0.0954, "token": 46686, "token_str": ":,"}, ], ) @require_torch def test_model_no_pad_pt(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="pt") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) @require_tf def test_model_no_pad_tf(self): unmasker = pipeline(task="fill-mask", model="sshleifer/tiny-distilroberta-base", framework="tf") unmasker.tokenizer.pad_token_id = None unmasker.tokenizer.pad_token = None self.run_pipeline_test(unmasker, []) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)") fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) examples = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def run_pipeline_test(self, fill_masker, examples): tokenizer = fill_masker.tokenizer model = fill_masker.model outputs = fill_masker( f"This is a {tokenizer.mask_token}", ) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) outputs = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], ) with self.assertRaises(ValueError): fill_masker([None]) # No mask_token is not supported with self.assertRaises(PipelineException): fill_masker("This is") self.run_test_top_k(model, tokenizer) self.run_test_targets(model, tokenizer) self.run_test_top_k_targets(model, tokenizer) self.fill_mask_with_duplicate_targets_and_top_k(model, tokenizer) self.fill_mask_with_multiple_masks(model, tokenizer) def run_test_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() targets = sorted(vocab.keys())[:2] # Pipeline argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, targets=targets) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Call argument fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) target_ids = {vocab[el] for el in targets} self.assertEqual({el["token"] for el in outputs}, target_ids) processed_targets = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["token_str"] for el in outputs}, set(processed_targets)) # Score equivalence outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=targets) tokens = [top_mask["token_str"] for top_mask in outputs] scores = [top_mask["score"] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(tokens) == set(targets): unmasked_targets = fill_masker(f"This is a {tokenizer.mask_token}", targets=tokens) target_scores = [top_mask["score"] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(scores), nested_simplify(target_scores)) # Raises with invalid with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets=[""]) with self.assertRaises(ValueError): outputs = fill_masker(f"This is a {tokenizer.mask_token}", targets="") def run_test_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer, top_k=2) outputs = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( outputs, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2) self.assertEqual( outputs2, [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ) self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def run_test_top_k_targets(self, model, tokenizer): vocab = tokenizer.get_vocab() fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) # top_k=2, ntargets=3 targets = sorted(vocab.keys())[:3] outputs = fill_masker(f"This is a {tokenizer.mask_token}", top_k=2, targets=targets) # If we use the most probably targets, and filter differently, we should still # have the same results targets2 = [el["token_str"] for el in sorted(outputs, key=lambda x: x["score"], reverse=True)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(targets2).issubset(targets): outputs2 = fill_masker(f"This is a {tokenizer.mask_token}", top_k=3, targets=targets2) # They should yield exactly the same result self.assertEqual(nested_simplify(outputs), nested_simplify(outputs2)) def fill_mask_with_duplicate_targets_and_top_k(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) vocab = tokenizer.get_vocab() # String duplicates + id duplicates targets = sorted(vocab.keys())[:3] targets = [targets[0], targets[1], targets[0], targets[2], targets[1]] outputs = fill_masker(f"My name is {tokenizer.mask_token}", targets=targets, top_k=10) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(outputs), 3) def fill_mask_with_multiple_masks(self, model, tokenizer): fill_masker = FillMaskPipeline(model=model, tokenizer=tokenizer) outputs = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}", top_k=2 ) self.assertEqual( outputs, [ [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], [ {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, {"sequence": ANY(str), "score": ANY(float), "token": ANY(int), "token_str": ANY(str)}, ], ], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_conversational.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoTokenizer, BlenderbotSmallForConditionalGeneration, BlenderbotSmallTokenizer, Conversation, ConversationalPipeline, TFAutoModelForCausalLM, pipeline, ) from transformers.testing_utils import ( backend_empty_cache, is_pipeline_test, is_torch_available, require_tf, require_torch, slow, torch_device, ) from .test_pipelines_common import ANY @is_pipeline_test class ConversationalPipelineTests(unittest.TestCase): def tearDown(self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): backend_empty_cache(torch_device) model_mapping = dict( list(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else [] + list(MODEL_FOR_CAUSAL_LM_MAPPING.items()) if MODEL_FOR_CAUSAL_LM_MAPPING else [] ) tf_model_mapping = dict( list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING else [] + list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.items()) if TF_MODEL_FOR_CAUSAL_LM_MAPPING else [] ) def get_test_pipeline(self, model, tokenizer, processor): conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) return conversation_agent, [Conversation("Hi there!")] def run_pipeline_test(self, conversation_agent, _): # Simple outputs = conversation_agent(Conversation("Hi there!"), max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Single list outputs = conversation_agent([Conversation("Hi there!")], max_new_tokens=5) self.assertEqual( outputs, Conversation([{"role": "user", "content": "Hi there!"}, {"role": "assistant", "content": ANY(str)}]), ) # Batch conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") self.assertEqual(len(conversation_1), 1) self.assertEqual(len(conversation_2), 1) outputs = conversation_agent([conversation_1, conversation_2], max_new_tokens=5) self.assertEqual(outputs, [conversation_1, conversation_2]) self.assertEqual( outputs, [ Conversation( [ {"role": "user", "content": "Going to the movies tonight - any suggestions?"}, {"role": "assistant", "content": ANY(str)}, ], ), Conversation( [ {"role": "user", "content": "What's the last book you have read?"}, {"role": "assistant", "content": ANY(str)}, ] ), ], ) # One conversation with history conversation_2.add_message({"role": "user", "content": "Why do you recommend it?"}) outputs = conversation_agent(conversation_2, max_new_tokens=5) self.assertEqual(outputs, conversation_2) self.assertEqual( outputs, Conversation( [ {"role": "user", "content": "What's the last book you have read?"}, {"role": "assistant", "content": ANY(str)}, {"role": "user", "content": "Why do you recommend it?"}, {"role": "assistant", "content": ANY(str)}, ] ), ) @require_torch @slow def test_integration_torch_conversation(self): # When conversation_agent = pipeline(task="conversational", device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") conversation_2 = Conversation("What's the last book you have read?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 1) self.assertEqual(len(result[1].past_user_inputs), 1) self.assertEqual(len(result[0].generated_responses), 1) self.assertEqual(len(result[1].generated_responses), 1) self.assertEqual(result[0].past_user_inputs[0], "Going to the movies tonight - any suggestions?") self.assertEqual(result[0].generated_responses[0], "The Big Lebowski") self.assertEqual(result[1].past_user_inputs[0], "What's the last book you have read?") self.assertEqual(result[1].generated_responses[0], "The Last Question") # When conversation_2.add_user_input("Why do you recommend it?") result = conversation_agent(conversation_2, do_sample=False, max_length=1000) # Then self.assertEqual(result, conversation_2) self.assertEqual(len(result.past_user_inputs), 2) self.assertEqual(len(result.generated_responses), 2) self.assertEqual(result.past_user_inputs[1], "Why do you recommend it?") self.assertEqual(result.generated_responses[1], "It's a good book.") @require_torch @slow def test_integration_torch_conversation_truncated_history(self): # When conversation_agent = pipeline(task="conversational", min_length_for_response=24, device=torch_device) conversation_1 = Conversation("Going to the movies tonight - any suggestions?") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) # When result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then self.assertEqual(result, conversation_1) self.assertEqual(len(result.past_user_inputs), 1) self.assertEqual(len(result.generated_responses), 1) self.assertEqual(result.past_user_inputs[0], "Going to the movies tonight - any suggestions?") self.assertEqual(result.generated_responses[0], "The Big Lebowski") # When conversation_1.add_user_input("Is it an action movie?") result = conversation_agent(conversation_1, do_sample=False, max_length=36) # Then self.assertEqual(result, conversation_1) self.assertEqual(len(result.past_user_inputs), 2) self.assertEqual(len(result.generated_responses), 2) self.assertEqual(result.past_user_inputs[1], "Is it an action movie?") self.assertEqual(result.generated_responses[1], "It's a comedy.") @require_torch def test_small_model_pt(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation = Conversation("hello") output = conversation_agent(conversation) self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"])) @require_tf def test_small_model_tf(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = TFAutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation = Conversation("hello") output = conversation_agent(conversation) self.assertEqual(output, Conversation(past_user_inputs=["hello"], generated_responses=["Hi"])) @require_torch @slow def test_integration_torch_conversation_dialogpt_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation_1 = Conversation("hello") inputs = conversation_agent.preprocess(conversation_1) self.assertEqual(inputs["input_ids"].tolist(), [[31373, 50256]]) conversation_2 = Conversation("how are you ?", past_user_inputs=["hello"], generated_responses=["Hi there!"]) inputs = conversation_agent.preprocess(conversation_2) self.assertEqual( inputs["input_ids"].tolist(), [[31373, 50256, 17250, 612, 0, 50256, 4919, 389, 345, 5633, 50256]] ) @unittest.skip("Model is curently gated") @require_torch @slow def test_integration_torch_conversation_llama2_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-chat-hf", use_default_system_prompt=True) conversation = Conversation( "What is so great about #1?", past_user_inputs=["I am going to Paris, what should I see?"], generated_responses=[ """\ Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris: 1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city. 2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa. 3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows. These are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.""" ], ) inputs = tokenizer._build_conversation_input_ids(conversation) EXPECTED_INPUTS_IDS = [ 1, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 29871, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29902, 626, 2675, 304, 3681, 29892, 825, 881, 306, 1074, 29973, 518, 29914, 25580, 29962, 3681, 29892, 278, 7483, 310, 3444, 29892, 338, 2998, 363, 967, 380, 27389, 11258, 29892, 1616, 19133, 29879, 29892, 15839, 2982, 22848, 29892, 322, 6017, 7716, 25005, 29889, 2266, 526, 777, 310, 278, 2246, 19650, 1953, 304, 1074, 297, 3681, 29901, 13, 13, 29896, 29889, 450, 382, 2593, 295, 23615, 29901, 450, 9849, 293, 382, 2593, 295, 23615, 338, 697, 310, 278, 1556, 5936, 13902, 2982, 22848, 297, 278, 3186, 322, 16688, 2078, 271, 400, 5086, 8386, 310, 278, 4272, 29889, 13, 29906, 29889, 450, 4562, 12675, 6838, 29901, 450, 4562, 12675, 338, 697, 310, 278, 3186, 29915, 29879, 10150, 322, 1556, 13834, 19133, 29879, 29892, 27261, 385, 21210, 573, 4333, 310, 1616, 322, 24238, 29879, 29892, 3704, 278, 2598, 29874, 29420, 29889, 13, 29941, 29889, 24337, 29899, 29928, 420, 315, 21471, 29901, 910, 9560, 274, 21471, 338, 697, 310, 278, 1556, 13834, 2982, 22848, 297, 3681, 322, 338, 2998, 363, 967, 22883, 293, 11258, 322, 380, 27389, 380, 7114, 12917, 5417, 29889, 13, 13, 1349, 968, 526, 925, 263, 2846, 310, 278, 1784, 19650, 1953, 393, 3681, 756, 304, 5957, 29889, 2973, 577, 1568, 304, 1074, 322, 437, 29892, 372, 29915, 29879, 694, 4997, 393, 3681, 338, 697, 310, 278, 1556, 5972, 6282, 391, 15422, 800, 297, 278, 3186, 29889, 29871, 2, 1, 518, 25580, 29962, 1724, 338, 577, 2107, 1048, 396, 29896, 29973, 518, 29914, 25580, 29962] # fmt: skip self.assertEqual(inputs, EXPECTED_INPUTS_IDS) model = AutoModelForCausalLM.from_pretrained("meta-llama/Llama-2-7b-chat-hf") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) EXPECTED_TEXT = "what topic you want to focus on and create content around it. This will help you stand out from other creators and attract a specific audience.\n\nStep 2: Set Up Your Channel\nCreate your YouTube account and customize your channel with your branding and logo. Make sure your channel name and profile picture are consistent with your niche.\n\nStep 3: Plan Your Content\nDevelop a content strategy that includes the type of content you want to create, how often you will post, and when you will post. Consider creating a content calendar to help you stay organized.\n\nStep 4: Invest in Quality Equipment\nInvest in good quality camera and microphone equipment to ensure your videos look and sound professional. You don't need to break the bank, but investing in good equipment will make a big difference in the quality of your videos.\n\nStep 5: Optimize Your Videos for Search\nUse keywords in your video titles, descriptions, and tags to help people find your videos when they search for topics related to your niche" conversation = Conversation( "<<SYS>>\n Only answer with emojis, and charades\n<</SYS>>\n\nHow can I build a house in 10 steps?" ) result = conversation_agent(conversation) self.assertEqual(result.generated_responses[-1], EXPECTED_TEXT) @require_torch @slow def test_integration_torch_conversation_blenderbot_400M_input_ids(self): tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) # test1 conversation_1 = Conversation("hello") inputs = conversation_agent.preprocess(conversation_1) self.assertEqual(inputs["input_ids"].tolist(), [[1710, 86, 2]]) # test2 conversation_1 = Conversation( "I like lasagne.", past_user_inputs=["hello"], generated_responses=[ " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie." ], ) inputs = conversation_agent.preprocess(conversation_1) self.assertEqual( inputs["input_ids"].tolist(), [ # This should be compared with the same conversation on ParlAI `safe_interactive` demo. [ 1710, # hello 86, 228, # Double space 228, 946, 304, 398, 6881, 558, 964, 38, 452, 315, 265, 6252, 452, 322, 968, 6884, 3146, 278, 306, 265, 617, 87, 388, 75, 341, 286, 521, 21, 228, # Double space 228, 281, # I like lasagne. 398, 6881, 558, 964, 21, 2, # EOS ], ], ) @require_torch @slow def test_integration_torch_conversation_blenderbot_400M(self): tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot-400M-distill") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer) conversation_1 = Conversation("hello") result = conversation_agent( conversation_1, ) self.assertEqual( result.generated_responses[0], # ParlAI implementation output, we have a different one, but it's our # second best, you can check by using num_return_sequences=10 # " Hello! How are you? I'm just getting ready to go to work, how about you?", " Hello! How are you doing today? I just got back from a walk with my dog.", ) conversation_1 = Conversation("Lasagne hello") result = conversation_agent(conversation_1, encoder_no_repeat_ngram_size=3) self.assertEqual( result.generated_responses[0], " Do you like lasagne? It is a traditional Italian dish consisting of a shepherd's pie.", ) conversation_1 = Conversation( "Lasagne hello Lasagne is my favorite Italian dish. Do you like lasagne? I like lasagne." ) result = conversation_agent( conversation_1, encoder_no_repeat_ngram_size=3, ) self.assertEqual( result.generated_responses[0], " Me too. I like how it can be topped with vegetables, meats, and condiments.", ) @require_torch @slow def test_integration_torch_conversation_encoder_decoder(self): # When tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot_small-90M") model = AutoModelForSeq2SeqLM.from_pretrained("facebook/blenderbot_small-90M") conversation_agent = ConversationalPipeline(model=model, tokenizer=tokenizer, device=torch_device) conversation_1 = Conversation("My name is Sarah and I live in London") conversation_2 = Conversation("Going to the movies tonight, What movie would you recommend? ") # Then self.assertEqual(len(conversation_1.past_user_inputs), 0) self.assertEqual(len(conversation_2.past_user_inputs), 0) # When result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 1) self.assertEqual(len(result[1].past_user_inputs), 1) self.assertEqual(len(result[0].generated_responses), 1) self.assertEqual(len(result[1].generated_responses), 1) self.assertEqual(result[0].past_user_inputs[0], "My name is Sarah and I live in London") self.assertEqual( result[0].generated_responses[0], "hi sarah, i live in london as well. do you have any plans for the weekend?", ) self.assertEqual( result[1].past_user_inputs[0], "Going to the movies tonight, What movie would you recommend? " ) self.assertEqual( result[1].generated_responses[0], "i don't know... i'm not really sure. what movie are you going to see?" ) # When conversation_1.add_user_input("Not yet, what about you?") conversation_2.add_user_input("What's your name?") result = conversation_agent([conversation_1, conversation_2], do_sample=False, max_length=1000) # Then self.assertEqual(result, [conversation_1, conversation_2]) self.assertEqual(len(result[0].past_user_inputs), 2) self.assertEqual(len(result[1].past_user_inputs), 2) self.assertEqual(len(result[0].generated_responses), 2) self.assertEqual(len(result[1].generated_responses), 2) self.assertEqual(result[0].past_user_inputs[1], "Not yet, what about you?") self.assertEqual(result[0].generated_responses[1], "i don't have any plans yet. i'm not sure what to do yet.") self.assertEqual(result[1].past_user_inputs[1], "What's your name?") self.assertEqual(result[1].generated_responses[1], "i don't have a name, but i'm going to see a horror movie.") @require_torch @slow def test_from_pipeline_conversation(self): model_id = "facebook/blenderbot_small-90M" # from model id conversation_agent_from_model_id = pipeline("conversational", model=model_id, tokenizer=model_id) # from model object model = BlenderbotSmallForConditionalGeneration.from_pretrained(model_id) tokenizer = BlenderbotSmallTokenizer.from_pretrained(model_id) conversation_agent_from_model = pipeline("conversational", model=model, tokenizer=tokenizer) conversation = Conversation("My name is Sarah and I live in London") conversation_copy = Conversation("My name is Sarah and I live in London") result_model_id = conversation_agent_from_model_id([conversation]) result_model = conversation_agent_from_model([conversation_copy]) # check for equality self.assertEqual( result_model_id.generated_responses[0], "hi sarah, i live in london as well. do you have any plans for the weekend?", ) self.assertEqual( result_model_id.generated_responses[0], result_model.generated_responses[0], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_timm @require_torch class ObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = ObjectDetectionPipeline(model=model, image_processor=processor) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def run_pipeline_test(self, object_detector, examples): outputs = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0) self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") batch = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] batch_outputs = object_detector(batch, threshold=0.0) self.assertEqual(len(batch), len(batch_outputs)) for outputs in batch_outputs: self.assertGreater(len(outputs), 0) for detected_object in outputs: self.assertEqual( detected_object, { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, }, ) @require_tf @unittest.skip("Object detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.0) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], threshold=0.0, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 159, "ymin": 120, "xmax": 480, "ymax": 359}}, ], ], ) @require_torch @slow def test_large_model_pt(self): model_id = "facebook/detr-resnet-50" model = AutoModelForObjectDetection.from_pretrained(model_id) feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) object_detector = ObjectDetectionPipeline(model=model, feature_extractor=feature_extractor) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_integration_torch_object_detection(self): model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) outputs = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 40, "ymin": 70, "xmax": 175, "ymax": 117}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 333, "ymin": 72, "xmax": 368, "ymax": 187}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 639, "ymax": 473}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ], ) @require_torch @slow def test_threshold(self): threshold = 0.9985 model_id = "facebook/detr-resnet-50" object_detector = pipeline("object-detection", model=model_id) outputs = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=threshold) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9988, "label": "cat", "box": {"xmin": 13, "ymin": 52, "xmax": 314, "ymax": 470}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 345, "ymin": 23, "xmax": 640, "ymax": 368}}, ], ) @require_torch @require_pytesseract @slow def test_layoutlm(self): model_id = "Narsil/layoutlmv3-finetuned-funsd" threshold = 0.9993 object_detector = pipeline("object-detection", model=model_id, threshold=threshold) outputs = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 294, "ymin": 254, "xmax": 343, "ymax": 264}}, ], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_text_classification.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TextClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow, torch_device from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TextClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } @require_torch def test_small_model_pt(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", top_k=2) self.assertEqual( nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] ) outputs = text_classifier(["This is great !", "This is bad"], top_k=2) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier("This is great !", top_k=1) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) # Legacy behavior outputs = text_classifier("This is great !", return_all_scores=False) self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) outputs = text_classifier("This is great !", return_all_scores=True) self.assertEqual( nested_simplify(outputs), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=True) self.assertEqual( nested_simplify(outputs), [ [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}], ], ) outputs = text_classifier(["This is great !", "Something else"], return_all_scores=False) self.assertEqual( nested_simplify(outputs), [ {"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_0", "score": 0.504}, ], ) @require_torch def test_accepts_torch_device(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch_device, ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @require_tf def test_small_model_tf(self): text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "LABEL_0", "score": 0.504}]) @slow @require_torch def test_pt_bert(self): text_classifier = pipeline("text-classification") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) @slow @require_tf def test_tf_bert(self): text_classifier = pipeline("text-classification", framework="tf") outputs = text_classifier("This is great !") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 1.0}]) outputs = text_classifier("This is bad !") self.assertEqual(nested_simplify(outputs), [{"label": "NEGATIVE", "score": 1.0}]) outputs = text_classifier("Birds are a type of animal") self.assertEqual(nested_simplify(outputs), [{"label": "POSITIVE", "score": 0.988}]) def get_test_pipeline(self, model, tokenizer, processor): text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) return text_classifier, ["HuggingFace is in", "This is another test"] def run_pipeline_test(self, text_classifier, _): model = text_classifier.model # Small inputs because BartTokenizer tiny has maximum position embeddings = 22 valid_inputs = "HuggingFace is in" outputs = text_classifier(valid_inputs) self.assertEqual(nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}]) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) valid_inputs = ["HuggingFace is in ", "Paris is in France"] outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}, {"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values()) self.assertTrue(outputs[1]["label"] in model.config.id2label.values()) # Forcing to get all results with `top_k=None` # This is NOT the legacy format outputs = text_classifier(valid_inputs, top_k=None) N = len(model.config.id2label.values()) self.assertEqual( nested_simplify(outputs), [[{"label": ANY(str), "score": ANY(float)}] * N, [{"label": ANY(str), "score": ANY(float)}] * N], ) valid_inputs = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"} outputs = text_classifier(valid_inputs) self.assertEqual( nested_simplify(outputs), {"label": ANY(str), "score": ANY(float)}, ) self.assertTrue(outputs["label"] in model.config.id2label.values()) # This might be used a text pair, but tokenizer + pipe interaction # makes it hard to understand that it's not using the pair properly # https://github.com/huggingface/transformers/issues/17305 # We disabled this usage instead as it was outputting wrong outputs. invalid_input = [["HuggingFace is in ", "Paris is in France"]] with self.assertRaises(ValueError): text_classifier(invalid_input) # This used to be valid for doing text pairs # We're keeping it working because of backward compatibility outputs = text_classifier([[["HuggingFace is in ", "Paris is in France"]]]) self.assertEqual( nested_simplify(outputs), [{"label": ANY(str), "score": ANY(float)}], ) self.assertTrue(outputs[0]["label"] in model.config.id2label.values())
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_zero_shot.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class ZeroShotClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): classifier = ZeroShotClassificationPipeline( model=model, tokenizer=tokenizer, candidate_labels=["polics", "health"] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def run_pipeline_test(self, classifier, _): outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics") self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) # No kwarg outputs = classifier("Who are you voting for in 2020?", ["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics"]) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) outputs = classifier("Who are you voting for in 2020?", candidate_labels="politics, public health") self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier("Who are you voting for in 2020?", candidate_labels=["politics", "public health"]) self.assertEqual( outputs, {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["scores"])), 1.0) outputs = classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="This text is about {}" ) self.assertEqual(outputs, {"sequence": ANY(str), "labels": [ANY(str)], "scores": [ANY(float)]}) # https://github.com/huggingface/transformers/issues/13846 outputs = classifier(["I am happy"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(1) ], ) outputs = classifier(["I am happy", "I am sad"], ["positive", "negative"]) self.assertEqual( outputs, [ {"sequence": ANY(str), "labels": [ANY(str), ANY(str)], "scores": [ANY(float), ANY(float)]} for i in range(2) ], ) with self.assertRaises(ValueError): classifier("", candidate_labels="politics") with self.assertRaises(TypeError): classifier(None, candidate_labels="politics") with self.assertRaises(ValueError): classifier("Who are you voting for in 2020?", candidate_labels="") with self.assertRaises(TypeError): classifier("Who are you voting for in 2020?", candidate_labels=None) with self.assertRaises(ValueError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template="Not formatting template", ) with self.assertRaises(AttributeError): classifier( "Who are you voting for in 2020?", candidate_labels="politics", hypothesis_template=None, ) self.run_entailment_id(classifier) def run_entailment_id(self, zero_shot_classifier: Pipeline): config = zero_shot_classifier.model.config original_label2id = config.label2id original_entailment = zero_shot_classifier.entailment_id config.label2id = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2} self.assertEqual(zero_shot_classifier.entailment_id, -1) config.label2id = {"entailment": 0, "neutral": 1, "contradiction": 2} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 0, "NON-ENTAIL": 1} self.assertEqual(zero_shot_classifier.entailment_id, 0) config.label2id = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0} self.assertEqual(zero_shot_classifier.entailment_id, 2) zero_shot_classifier.model.config.label2id = original_label2id self.assertEqual(original_entailment, zero_shot_classifier.entailment_id) @require_torch def test_truncation(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( "Who are you voting for in 2020?" * 100, candidate_labels=["politics", "public health", "science"] ) @require_torch def test_small_model_pt(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="pt", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @require_tf def test_small_model_tf(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf", ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["science", "public health", "politics"], "scores": [0.333, 0.333, 0.333], }, ) @slow @require_torch def test_large_model_pt(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="FacebookAI/roberta-large-mnli", framework="pt" ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, ) @slow @require_tf def test_large_model_tf(self): zero_shot_classifier = pipeline( "zero-shot-classification", model="FacebookAI/roberta-large-mnli", framework="tf" ) outputs = zero_shot_classifier( "Who are you voting for in 2020?", candidate_labels=["politics", "public health", "science"] ) self.assertEqual( nested_simplify(outputs), { "sequence": "Who are you voting for in 2020?", "labels": ["politics", "public health", "science"], "scores": [0.976, 0.015, 0.009], }, ) outputs = zero_shot_classifier( "The dominant sequence transduction models are based on complex recurrent or convolutional neural networks" " in an encoder-decoder configuration. The best performing models also connect the encoder and decoder" " through an attention mechanism. We propose a new simple network architecture, the Transformer, based" " solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two" " machine translation tasks show these models to be superior in quality while being more parallelizable" " and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014" " English-to-German translation task, improving over the existing best results, including ensembles by" " over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new" " single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small" " fraction of the training costs of the best models from the literature. We show that the Transformer" " generalizes well to other tasks by applying it successfully to English constituency parsing both with" " large and limited training data.", candidate_labels=["machine learning", "statistics", "translation", "vision"], multi_label=True, ) self.assertEqual( nested_simplify(outputs), { "sequence": ( "The dominant sequence transduction models are based on complex recurrent or convolutional neural" " networks in an encoder-decoder configuration. The best performing models also connect the" " encoder and decoder through an attention mechanism. We propose a new simple network" " architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence" " and convolutions entirely. Experiments on two machine translation tasks show these models to be" " superior in quality while being more parallelizable and requiring significantly less time to" " train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task," " improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014" " English-to-French translation task, our model establishes a new single-model state-of-the-art" " BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training" " costs of the best models from the literature. We show that the Transformer generalizes well to" " other tasks by applying it successfully to English constituency parsing both with large and" " limited training data." ), "labels": ["translation", "machine learning", "vision", "statistics"], "scores": [0.817, 0.713, 0.018, 0.018], }, )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_summarization.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, SummarizationPipeline, TFPreTrainedModel, pipeline, ) from transformers.testing_utils import is_pipeline_test, require_tf, require_torch, slow, torch_device from transformers.tokenization_utils import TruncationStrategy from .test_pipelines_common import ANY @is_pipeline_test class SummarizationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING tf_model_mapping = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING def get_test_pipeline(self, model, tokenizer, processor): summarizer = SummarizationPipeline(model=model, tokenizer=tokenizer) return summarizer, ["(CNN)The Palestinian Authority officially became", "Some other text"] def run_pipeline_test(self, summarizer, _): model = summarizer.model outputs = summarizer("(CNN)The Palestinian Authority officially became") self.assertEqual(outputs, [{"summary_text": ANY(str)}]) outputs = summarizer( "(CNN)The Palestinian Authority officially became ", num_beams=2, min_length=2, max_length=5, ) self.assertEqual(outputs, [{"summary_text": ANY(str)}]) # Some models (Switch Transformers, LED, T5, LongT5, etc) can handle long sequences. model_can_handle_longer_seq = [ "SwitchTransformersConfig", "T5Config", "LongT5Config", "LEDConfig", "PegasusXConfig", "FSMTConfig", "M2M100Config", "ProphetNetConfig", # positional embeddings up to a fixed maximum size (otherwise clamping the values) ] if model.config.__class__.__name__ not in model_can_handle_longer_seq: # Too long and exception is expected. # For TF models, if the weights are initialized in GPU context, we won't get expected index error from # the embedding layer. if not ( isinstance(model, TFPreTrainedModel) and len(summarizer.model.trainable_weights) > 0 and "GPU" in summarizer.model.trainable_weights[0].device ): with self.assertRaises(Exception): outputs = summarizer("This " * 1000) outputs = summarizer("This " * 1000, truncation=TruncationStrategy.ONLY_FIRST) @require_torch def test_small_model_pt(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="pt") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_tf def test_small_model_tf(self): summarizer = pipeline(task="summarization", model="sshleifer/tiny-mbart", framework="tf") outputs = summarizer("This is a small test") self.assertEqual( outputs, [ { "summary_text": "เข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไปเข้าไป" } ], ) @require_torch @slow def test_integration_torch_summarization(self): summarizer = pipeline(task="summarization", device=torch_device) cnn_article = ( " (CNN)The Palestinian Authority officially became the 123rd member of the International Criminal Court on" " Wednesday, a step that gives the court jurisdiction over alleged crimes in Palestinian territories. The" " formal accession was marked with a ceremony at The Hague, in the Netherlands, where the court is based." " The Palestinians signed the ICC's founding Rome Statute in January, when they also accepted its" ' jurisdiction over alleged crimes committed "in the occupied Palestinian territory, including East' ' Jerusalem, since June 13, 2014." Later that month, the ICC opened a preliminary examination into the' " situation in Palestinian territories, paving the way for possible war crimes investigations against" " Israelis. As members of the court, Palestinians may be subject to counter-charges as well. Israel and" " the United States, neither of which is an ICC member, opposed the Palestinians' efforts to join the" " body. But Palestinian Foreign Minister Riad al-Malki, speaking at Wednesday's ceremony, said it was a" ' move toward greater justice. "As Palestine formally becomes a State Party to the Rome Statute today, the' ' world is also a step closer to ending a long era of impunity and injustice," he said, according to an' ' ICC news release. "Indeed, today brings us closer to our shared goals of justice and peace." Judge' " Kuniko Ozaki, a vice president of the ICC, said acceding to the treaty was just the first step for the" ' Palestinians. "As the Rome Statute today enters into force for the State of Palestine, Palestine' " acquires all the rights as well as responsibilities that come with being a State Party to the Statute." ' These are substantive commitments, which cannot be taken lightly," she said. Rights group Human Rights' ' Watch welcomed the development. "Governments seeking to penalize Palestine for joining the ICC should' " immediately end their pressure, and countries that support universal acceptance of the court's treaty" ' should speak out to welcome its membership," said Balkees Jarrah, international justice counsel for the' " group. \"What's objectionable is the attempts to undermine international justice, not Palestine's" ' decision to join a treaty to which over 100 countries around the world are members." In January, when' " the preliminary ICC examination was opened, Israeli Prime Minister Benjamin Netanyahu described it as an" ' outrage, saying the court was overstepping its boundaries. The United States also said it "strongly"' " disagreed with the court's decision. \"As we have said repeatedly, we do not believe that Palestine is a" ' state and therefore we do not believe that it is eligible to join the ICC," the State Department said in' ' a statement. It urged the warring sides to resolve their differences through direct negotiations. "We' ' will continue to oppose actions against Israel at the ICC as counterproductive to the cause of peace,"' " it said. But the ICC begs to differ with the definition of a state for its purposes and refers to the" ' territories as "Palestine." While a preliminary examination is not a formal investigation, it allows the' " court to review evidence and determine whether to investigate suspects on both sides. Prosecutor Fatou" ' Bensouda said her office would "conduct its analysis in full independence and impartiality." The war' " between Israel and Hamas militants in Gaza last summer left more than 2,000 people dead. The inquiry" " will include alleged war crimes committed since June. The International Criminal Court was set up in" " 2002 to prosecute genocide, crimes against humanity and war crimes. CNN's Vasco Cotovio, Kareem Khadder" " and Faith Karimi contributed to this report." ) expected_cnn_summary = ( " The Palestinian Authority becomes the 123rd member of the International Criminal Court . The move gives" " the court jurisdiction over alleged crimes in Palestinian territories . Israel and the United States" " opposed the Palestinians' efforts to join the court . Rights group Human Rights Watch welcomes the move," " says governments seeking to penalize Palestine should end pressure ." ) result = summarizer(cnn_article) self.assertEqual(result[0]["summary_text"], expected_cnn_summary)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_image_to_image.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_TO_IMAGE_MAPPING, AutoImageProcessor, AutoModelForImageToImage, ImageToImagePipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class ImageToImagePipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_TO_IMAGE_MAPPING examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] @require_torch @require_vision @slow def test_pipeline(self): model_id = "caidas/swin2SR-classical-sr-x2-64" upscaler = pipeline("image-to-image", model=model_id) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976)) @require_torch @require_vision @slow def test_pipeline_model_processor(self): model_id = "caidas/swin2SR-classical-sr-x2-64" model = AutoModelForImageToImage.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) upscaler = ImageToImagePipeline(model=model, image_processor=image_processor) upscaled_list = upscaler(self.examples) self.assertEqual(len(upscaled_list), len(self.examples)) for output in upscaled_list: self.assertIsInstance(output, Image.Image) self.assertEqual(upscaled_list[0].size, (1296, 976)) self.assertEqual(upscaled_list[1].size, (1296, 976))
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_visual_question_answering.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_accelerator, require_vision, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch @require_vision class VisualQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def get_test_pipeline(self, model, tokenizer, processor): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") examples = [ { "image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "question": "How many cats are there?", }, { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "question": "How many cats are there?", }, ] return vqa_pipeline, examples def run_pipeline_test(self, vqa_pipeline, examples): outputs = vqa_pipeline(examples, top_k=1) self.assertEqual( outputs, [ [{"score": ANY(float), "answer": ANY(str)}], [{"score": ANY(float), "answer": ANY(str)}], ], ) @require_torch def test_small_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="hf-internal-testing/tiny-vilt-random-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question="How many cats are there?", top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( outputs, [{"score": ANY(float), "answer": ANY(str)}, {"score": ANY(float), "answer": ANY(str)}] ) @require_torch @require_torch_accelerator def test_small_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration" ) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": ANY(str)}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": ANY(str)}]] * 2) vqa_pipeline = pipeline( "visual-question-answering", model="hf-internal-testing/tiny-random-Blip2ForConditionalGeneration", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) self.assertEqual(vqa_pipeline.model.vision_model.dtype, torch.float16) outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": ANY(str)}]) @slow @require_torch def test_large_model_pt(self): vqa_pipeline = pipeline("visual-question-answering", model="dandelin/vilt-b32-finetuned-vqa") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "How many cats are there?" outputs = vqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] ) outputs = vqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [[{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}]] * 2, ) @slow @require_torch @require_torch_accelerator def test_large_model_pt_blip2(self): vqa_pipeline = pipeline( "visual-question-answering", model="Salesforce/blip2-opt-2.7b", model_kwargs={"torch_dtype": torch.float16}, device=torch_device, ) self.assertEqual(vqa_pipeline.model.device, torch.device("{}:0".format(torch_device))) self.assertEqual(vqa_pipeline.model.language_model.dtype, torch.float16) image = "./tests/fixtures/tests_samples/COCO/000000039769.png" question = "Question: how many cats are there? Answer:" outputs = vqa_pipeline(image=image, question=question) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline({"image": image, "question": question}) self.assertEqual(outputs, [{"answer": "two"}]) outputs = vqa_pipeline([{"image": image, "question": question}, {"image": image, "question": question}]) self.assertEqual(outputs, [[{"answer": "two"}]] * 2) @require_tf @unittest.skip("Visual question answering not implemented in TF") def test_small_model_tf(self): pass
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_text_to_audio.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING, AutoProcessor, TextToAudioPipeline, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.trainer_utils import set_seed from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class TextToAudioPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TEXT_TO_WAVEFORM_MAPPING # for now only test text_to_waveform and not text_to_spectrogram @slow @require_torch def test_small_musicgen_pt(self): music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": False, "max_new_tokens": 250, } outputs = music_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 32000}, outputs) # test two examples side-by-side outputs = music_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = music_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_medium_seamless_m4t_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/hf-seamless-m4t-medium", framework="pt") for forward_params in [{"tgt_lang": "eng"}, {"return_intermediate_token_ids": True, "tgt_lang": "eng"}]: outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual({"audio": ANY(np.ndarray), "sampling_rate": 16000}, outputs) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2 ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch def test_small_bark_pt(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt") forward_params = { # Using `do_sample=False` to force deterministic output "do_sample": False, "semantic_max_new_tokens": 100, } outputs = speech_generator("This is a test", forward_params=forward_params) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) # test two examples side-by-side outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test other generation strategy forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, "semantic_num_return_sequences": 2, } outputs = speech_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test using a speaker embedding processor = AutoProcessor.from_pretrained("suno/bark-small") temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt outputs = speech_generator( ["This is a test", "This is a second test"], forward_params=forward_params, batch_size=2, ) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) @slow @require_torch_accelerator def test_conversion_additional_tensor(self): speech_generator = pipeline(task="text-to-audio", model="suno/bark-small", framework="pt", device=torch_device) processor = AutoProcessor.from_pretrained("suno/bark-small") forward_params = { "do_sample": True, "semantic_max_new_tokens": 100, } # atm, must do to stay coherent with BarkProcessor preprocess_params = { "max_length": 256, "add_special_tokens": False, "return_attention_mask": True, "return_token_type_ids": False, "padding": "max_length", } outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params, ) temp_inp = processor("hey, how are you?", voice_preset="v2/en_speaker_5") history_prompt = temp_inp["history_prompt"] forward_params["history_prompt"] = history_prompt # history_prompt is a torch.Tensor passed as a forward_param # if generation is successful, it means that it was passed to the right device outputs = speech_generator( "This is a test", forward_params=forward_params, preprocess_params=preprocess_params ) self.assertEqual( {"audio": ANY(np.ndarray), "sampling_rate": 24000}, outputs, ) @slow @require_torch def test_vits_model_pt(self): speech_generator = pipeline(task="text-to-audio", model="facebook/mms-tts-eng", framework="pt") outputs = speech_generator("This is a test") self.assertEqual(outputs["sampling_rate"], 16000) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # test two examples side-by-side outputs = speech_generator(["This is a test", "This is a second test"]) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio) # test batching outputs = speech_generator(["This is a test", "This is a second test"], batch_size=2) self.assertEqual(ANY(np.ndarray), outputs[0]["audio"]) @slow @require_torch def test_forward_model_kwargs(self): # use vits - a forward model speech_generator = pipeline(task="text-to-audio", model="kakao-enterprise/vits-vctk", framework="pt") # for reproducibility set_seed(555) outputs = speech_generator("This is a test", forward_params={"speaker_id": 5}) audio = outputs["audio"] with self.assertRaises(TypeError): # assert error if generate parameter outputs = speech_generator("This is a test", forward_params={"speaker_id": 5, "do_sample": True}) forward_params = {"speaker_id": 5} generate_kwargs = {"do_sample": True} with self.assertRaises(ValueError): # assert error if generate_kwargs with forward-only models outputs = speech_generator( "This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs ) self.assertTrue(np.abs(outputs["audio"] - audio).max() < 1e-5) @slow @require_torch def test_generative_model_kwargs(self): # use musicgen - a generative model music_generator = pipeline(task="text-to-audio", model="facebook/musicgen-small", framework="pt") forward_params = { "do_sample": True, "max_new_tokens": 250, } # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params) audio = outputs["audio"] self.assertEqual(ANY(np.ndarray), audio) # make sure generate kwargs get priority over forward params forward_params = { "do_sample": False, "max_new_tokens": 250, } generate_kwargs = {"do_sample": True} # for reproducibility set_seed(555) outputs = music_generator("This is a test", forward_params=forward_params, generate_kwargs=generate_kwargs) self.assertListEqual(outputs["audio"].tolist(), audio.tolist()) def get_test_pipeline(self, model, tokenizer, processor): speech_generator = TextToAudioPipeline(model=model, tokenizer=tokenizer) return speech_generator, ["This is a test", "Another test"] def run_pipeline_test(self, speech_generator, _): outputs = speech_generator("This is a test") self.assertEqual(ANY(np.ndarray), outputs["audio"]) forward_params = ( {"num_return_sequences": 2, "do_sample": True} if speech_generator.model.can_generate() else {} ) outputs = speech_generator(["This is great !", "Something else"], forward_params=forward_params) audio = [output["audio"] for output in outputs] self.assertEqual([ANY(np.ndarray), ANY(np.ndarray)], audio)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_zero_shot_image_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ZeroShotImageClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLIP would be there for now. # model_mapping = {CLIPConfig: CLIPModel} # def get_test_pipeline(self, model, tokenizer, processor): # if tokenizer is None: # # Side effect of no Fast Tokenizer class for these model, so skipping # # But the slow tokenizer test should still run as they're quite small # self.skipTest("No tokenizer available") # return # # return None, None # image_classifier = ZeroShotImageClassificationPipeline( # model=model, tokenizer=tokenizer, feature_extractor=processor # ) # # test with a raw waveform # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # image2 = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # return image_classifier, [image, image2] # def run_pipeline_test(self, pipe, examples): # image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") # outputs = pipe(image, candidate_labels=["A", "B"]) # self.assertEqual(outputs, {"text": ANY(str)}) # # Batching # outputs = pipe([image] * 3, batch_size=2, candidate_labels=["A", "B"]) @require_torch def test_small_model_pt(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) # The floating scores are so close, we enter floating error approximation and the order is not guaranteed across # python and torch versions. self.assertIn( nested_simplify(output), [ [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}, {"score": 0.333, "label": "b"}], [{"score": 0.333, "label": "b"}, {"score": 0.333, "label": "a"}, {"score": 0.333, "label": "c"}], ], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @require_tf def test_small_model_tf(self): image_classifier = pipeline( model="hf-internal-testing/tiny-random-clip-zero-shot-image-classification", framework="tf" ) image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["a", "b", "c"]) self.assertEqual( nested_simplify(output), [{"score": 0.333, "label": "a"}, {"score": 0.333, "label": "b"}, {"score": 0.333, "label": "c"}], ) output = image_classifier([image] * 5, candidate_labels=["A", "B", "C"], batch_size=2) self.assertEqual( nested_simplify(output), # Pipeline outputs are supposed to be deterministic and # So we could in theory have real values "A", "B", "C" instead # of ANY(str). # However it seems that in this particular case, the floating # scores are so close, we enter floating error approximation # and the order is not guaranteed anymore with batching. [ [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], [ {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, {"score": 0.333, "label": ANY(str)}, ], ], ) @slow @require_torch def test_large_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_tf def test_large_model_tf(self): image_classifier = pipeline( task="zero-shot-image-classification", model="openai/clip-vit-base-patch32", framework="tf" ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["cat", "plane", "remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["cat", "plane", "remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.511, "label": "remote"}, {"score": 0.485, "label": "cat"}, {"score": 0.004, "label": "plane"}, ], ] * 5, ) @slow @require_torch def test_siglip_model_pt(self): image_classifier = pipeline( task="zero-shot-image-classification", model="google/siglip-base-patch16-224", ) # This is an image of 2 cats with remotes and no planes image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") output = image_classifier(image, candidate_labels=["2 cats", "a plane", "a remote"]) self.assertEqual( nested_simplify(output), [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ], ) output = image_classifier([image] * 5, candidate_labels=["2 cats", "a plane", "a remote"], batch_size=2) self.assertEqual( nested_simplify(output), [ [ {"score": 0.198, "label": "2 cats"}, {"score": 0.0, "label": "a remote"}, {"score": 0.0, "label": "a plane"}, ] ] * 5, )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_video_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_av, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_av class VideoClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): example_video_filepath = hf_hub_download( repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset" ) video_classifier = VideoClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ example_video_filepath, "https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4", ] return video_classifier, examples def run_pipeline_test(self, video_classifier, examples): for example in examples: outputs = video_classifier(example) self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification" small_feature_extractor = VideoMAEFeatureExtractor( size={"shortest_edge": 10}, crop_size={"height": 10, "width": 10} ) video_classifier = pipeline( "video-classification", model=small_model, feature_extractor=small_feature_extractor, frame_sampling_rate=4 ) video_file_path = hf_hub_download(repo_id="nateraw/video-demo", filename="archery.mp4", repo_type="dataset") outputs = video_classifier(video_file_path, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ) outputs = video_classifier( [ video_file_path, video_file_path, ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}], ], ) @require_tf def test_small_model_tf(self): pass
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_image_feature_extraction.py
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from transformers import ( MODEL_MAPPING, TF_MODEL_MAPPING, TOKENIZER_MAPPING, ImageFeatureExtractionPipeline, is_tf_available, is_torch_available, is_vision_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf if is_vision_available(): from PIL import Image # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @is_pipeline_test class ImageFeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING @require_torch def test_small_model_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) img = prepare_img() outputs = feature_extractor(img) self.assertEqual( nested_simplify(outputs[0][0]), [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip @require_torch def test_small_model_w_pooler_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="pt" ) img = prepare_img() outputs = feature_extractor(img, pool=True) self.assertEqual( nested_simplify(outputs[0]), [-0.056, 0.083, 0.021, 0.038, 0.242, -0.279, -0.033, -0.003, 0.200, -0.192, 0.045, -0.095, -0.077, 0.017, -0.058, -0.063, -0.029, -0.204, 0.014, 0.042, 0.305, -0.205, -0.099, 0.146, -0.287, 0.020, 0.168, -0.052, 0.046, 0.048, -0.156, 0.093]) # fmt: skip @require_tf def test_small_model_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="tf" ) img = prepare_img() outputs = feature_extractor(img) self.assertEqual( nested_simplify(outputs[0][0]), [-1.417, -0.392, -1.264, -1.196, 1.648, 0.885, 0.56, -0.606, -1.175, 0.823, 1.912, 0.081, -0.053, 1.119, -0.062, -1.757, -0.571, 0.075, 0.959, 0.118, 1.201, -0.672, -0.498, 0.364, 0.937, -1.623, 0.228, 0.19, 1.697, -1.115, 0.583, -0.981]) # fmt: skip @require_tf def test_small_model_w_pooler_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit-w-pooler", framework="tf" ) img = prepare_img() outputs = feature_extractor(img, pool=True) self.assertEqual( nested_simplify(outputs[0]), [-0.056, 0.083, 0.021, 0.038, 0.242, -0.279, -0.033, -0.003, 0.200, -0.192, 0.045, -0.095, -0.077, 0.017, -0.058, -0.063, -0.029, -0.204, 0.014, 0.042, 0.305, -0.205, -0.099, 0.146, -0.287, 0.020, 0.168, -0.052, 0.046, 0.048, -0.156, 0.093]) # fmt: skip @require_torch def test_image_processing_small_model_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) # test with image processor parameters image_processor_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() with pytest.raises(ValueError): # Image doesn't match model input size feature_extractor(img, image_processor_kwargs=image_processor_kwargs) image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) # Test pooling option outputs = feature_extractor(img, pool=True) self.assertEqual(np.squeeze(outputs).shape, (32,)) @require_tf def test_image_processing_small_model_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" ) # test with image processor parameters image_processor_kwargs = {"size": {"height": 300, "width": 300}} img = prepare_img() with pytest.raises(ValueError): # Image doesn't match model input size feature_extractor(img, image_processor_kwargs=image_processor_kwargs) image_processor_kwargs = {"image_mean": [0, 0, 0], "image_std": [1, 1, 1]} img = prepare_img() outputs = feature_extractor(img, image_processor_kwargs=image_processor_kwargs) self.assertEqual(np.squeeze(outputs).shape, (226, 32)) # Test pooling option outputs = feature_extractor(img, pool=True) self.assertEqual(np.squeeze(outputs).shape, (32,)) @require_torch def test_return_tensors_pt(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="pt" ) img = prepare_img() outputs = feature_extractor(img, return_tensors=True) self.assertTrue(torch.is_tensor(outputs)) @require_tf def test_return_tensors_tf(self): feature_extractor = pipeline( task="image-feature-extraction", model="hf-internal-testing/tiny-random-vit", framework="tf" ) img = prepare_img() outputs = feature_extractor(img, return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) def get_test_pipeline(self, model, tokenizer, processor): if processor is None: self.skipTest("No image processor") elif type(model.config) in TOKENIZER_MAPPING: self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") elif model.config.is_encoder_decoder: self.skipTest( """encoder_decoder models are trickier for this pipeline. Do we want encoder + decoder inputs to get some featues? Do we want encoder only features ? For now ignore those. """ ) feature_extractor = ImageFeatureExtractionPipeline(model=model, image_processor=processor) img = prepare_img() return feature_extractor, [img, img] def run_pipeline_test(self, feature_extractor, examples): imgs = examples outputs = feature_extractor(imgs[0]) self.assertEqual(len(outputs), 1) outputs = feature_extractor(imgs) self.assertEqual(len(outputs), 2)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_mask_generation.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from typing import Dict import numpy as np from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"hash": hashimage(mask), "shape": shape} @is_pipeline_test @require_vision @require_torch class MaskGenerationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items()) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) tf_model_mapping = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items()) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = MaskGenerationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] # TODO: Implement me @Arthur def run_pipeline_test(self, mask_generator, examples): pass @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_small_model_pt(self): image_segmenter = pipeline("mask-generation", model="facebook/sam-vit-huge") outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", points_per_batch=256) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {'mask': {'hash': '115ad19f5f', 'shape': (480, 640)}, 'scores': 1.0444}, {'mask': {'hash': '6affa964c6', 'shape': (480, 640)}, 'scores': 1.021}, {'mask': {'hash': 'dfe28a0388', 'shape': (480, 640)}, 'scores': 1.0167}, {'mask': {'hash': 'c0a5f4a318', 'shape': (480, 640)}, 'scores': 1.0132}, {'mask': {'hash': 'fe8065c197', 'shape': (480, 640)}, 'scores': 1.0053}, {'mask': {'hash': 'e2d0b7a0b7', 'shape': (480, 640)}, 'scores': 0.9967}, {'mask': {'hash': '453c7844bd', 'shape': (480, 640)}, 'scores': 0.993}, {'mask': {'hash': '3d44f2926d', 'shape': (480, 640)}, 'scores': 0.9909}, {'mask': {'hash': '64033ddc3f', 'shape': (480, 640)}, 'scores': 0.9879}, {'mask': {'hash': '801064ff79', 'shape': (480, 640)}, 'scores': 0.9834}, {'mask': {'hash': '6172f276ef', 'shape': (480, 640)}, 'scores': 0.9716}, {'mask': {'hash': 'b49e60e084', 'shape': (480, 640)}, 'scores': 0.9612}, {'mask': {'hash': 'a811e775fd', 'shape': (480, 640)}, 'scores': 0.9599}, {'mask': {'hash': 'a6a8ebcf4b', 'shape': (480, 640)}, 'scores': 0.9552}, {'mask': {'hash': '9d8257e080', 'shape': (480, 640)}, 'scores': 0.9532}, {'mask': {'hash': '32de6454a8', 'shape': (480, 640)}, 'scores': 0.9516}, {'mask': {'hash': 'af3d4af2c8', 'shape': (480, 640)}, 'scores': 0.9499}, {'mask': {'hash': '3c6db475fb', 'shape': (480, 640)}, 'scores': 0.9483}, {'mask': {'hash': 'c290813fb9', 'shape': (480, 640)}, 'scores': 0.9464}, {'mask': {'hash': 'b6f0b8f606', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': '92ce16bfdf', 'shape': (480, 640)}, 'scores': 0.943}, {'mask': {'hash': 'c749b25868', 'shape': (480, 640)}, 'scores': 0.9408}, {'mask': {'hash': 'efb6cab859', 'shape': (480, 640)}, 'scores': 0.9335}, {'mask': {'hash': '1ff2eafb30', 'shape': (480, 640)}, 'scores': 0.9326}, {'mask': {'hash': '788b798e24', 'shape': (480, 640)}, 'scores': 0.9262}, {'mask': {'hash': 'abea804f0e', 'shape': (480, 640)}, 'scores': 0.8999}, {'mask': {'hash': '7b9e8ddb73', 'shape': (480, 640)}, 'scores': 0.8986}, {'mask': {'hash': 'cd24047c8a', 'shape': (480, 640)}, 'scores': 0.8984}, {'mask': {'hash': '6943e6bcbd', 'shape': (480, 640)}, 'scores': 0.8873}, {'mask': {'hash': 'b5f47c9191', 'shape': (480, 640)}, 'scores': 0.8871} ], ) # fmt: on @require_torch @slow def test_threshold(self): model_id = "facebook/sam-vit-huge" image_segmenter = pipeline("mask-generation", model=model_id) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", pred_iou_thresh=1, points_per_batch=256 ) # Shortening by hashing new_outupt = [] for i, o in enumerate(outputs["masks"]): new_outupt += [{"mask": mask_to_test_readable(o), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(new_outupt, decimals=4), [ {"mask": {"hash": "115ad19f5f", "shape": (480, 640)}, "scores": 1.0444}, {"mask": {"hash": "6affa964c6", "shape": (480, 640)}, "scores": 1.0210}, {"mask": {"hash": "dfe28a0388", "shape": (480, 640)}, "scores": 1.0167}, {"mask": {"hash": "c0a5f4a318", "shape": (480, 640)}, "scores": 1.0132}, {"mask": {"hash": "fe8065c197", "shape": (480, 640)}, "scores": 1.0053}, ], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_zero_shot_audio_classification.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class ZeroShotAudioClassificationPipelineTests(unittest.TestCase): # Deactivating auto tests since we don't have a good MODEL_FOR_XX mapping, # and only CLAP would be there for now. # model_mapping = {CLAPConfig: CLAPModel} @require_torch def test_small_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" ) dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], ) @unittest.skip("No models are available in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): audio_classifier = pipeline( task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", ) # This is an audio of a dog dataset = load_dataset("hf-internal-testing/ashraq-esc50-1-dog-example") audio = dataset["train"]["audio"][-1]["array"] output = audio_classifier(audio, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ {"score": 1.0, "label": "Sound of a dog"}, {"score": 0.0, "label": "Sound of vaccum cleaner"}, ], ) output = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"]) self.assertEqual( nested_simplify(output), [ [ {"score": 1.0, "label": "Sound of a dog"}, {"score": 0.0, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) output = audio_classifier( [audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 ) self.assertEqual( nested_simplify(output), [ [ {"score": 1.0, "label": "Sound of a dog"}, {"score": 0.0, "label": "Sound of vaccum cleaner"}, ], ] * 5, ) @unittest.skip("No models are available in TF") def test_large_model_tf(self): pass
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_image_classification.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, PreTrainedTokenizerBase, is_vision_available, ) from transformers.pipelines import ImageClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_torch_or_tf @require_vision class ImageClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): image_classifier = ImageClassificationPipeline(model=model, image_processor=processor, top_k=2) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", ] return image_classifier, examples def run_pipeline_test(self, image_classifier, examples): outputs = image_classifier("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual( outputs, [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # Accepts URL + PIL.Image + lists outputs = image_classifier( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( outputs, [ [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], [ {"score": ANY(float), "label": ANY(str)}, {"score": ANY(float), "label": ANY(str)}, ], ], ) @require_torch def test_small_model_pt(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) @require_tf def test_small_model_tf(self): small_model = "hf-internal-testing/tiny-random-vit" image_classifier = pipeline("image-classification", model=small_model, framework="tf") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], top_k=2, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], [{"label": "LABEL_1", "score": 0.574}, {"label": "LABEL_0", "score": 0.426}], ], ) def test_custom_tokenizer(self): tokenizer = PreTrainedTokenizerBase() # Assert that the pipeline can be initialized with a feature extractor that is not in any mapping image_classifier = pipeline( "image-classification", model="hf-internal-testing/tiny-random-vit", tokenizer=tokenizer ) self.assertIs(image_classifier.tokenizer, tokenizer) @slow @require_torch def test_perceiver(self): # Perceiver is not tested by `run_pipeline_test` properly. # That is because the type of feature_extractor and model preprocessor need to be kept # in sync, which is not the case in the current design image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-conv") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.4385, "label": "tabby, tabby cat"}, {"score": 0.321, "label": "tiger cat"}, {"score": 0.0502, "label": "Egyptian cat"}, {"score": 0.0137, "label": "crib, cot"}, {"score": 0.007, "label": "radiator"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-fourier") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.5658, "label": "tabby, tabby cat"}, {"score": 0.1309, "label": "tiger cat"}, {"score": 0.0722, "label": "Egyptian cat"}, {"score": 0.0707, "label": "remote control, remote"}, {"score": 0.0082, "label": "computer keyboard, keypad"}, ], ) image_classifier = pipeline("image-classification", model="deepmind/vision-perceiver-learned") outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.3022, "label": "tabby, tabby cat"}, {"score": 0.2362, "label": "Egyptian cat"}, {"score": 0.1856, "label": "tiger cat"}, {"score": 0.0324, "label": "remote control, remote"}, {"score": 0.0096, "label": "quilt, comforter, comfort, puff"}, ], ) @slow @require_torch def test_multilabel_classification(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) image_classifier.model.config.problem_type = "multi_label_classification" outputs = image_classifier("http://images.cocodataset.org/val2017/000000039769.jpg") self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ) outputs = image_classifier( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], ], ) @slow @require_torch def test_function_to_apply(self): small_model = "hf-internal-testing/tiny-random-vit" # Sigmoid is applied for multi-label classification image_classifier = pipeline("image-classification", model=small_model) outputs = image_classifier( "http://images.cocodataset.org/val2017/000000039769.jpg", function_to_apply="sigmoid", ) self.assertEqual( nested_simplify(outputs, decimals=4), [{"label": "LABEL_1", "score": 0.5356}, {"label": "LABEL_0", "score": 0.4612}], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_token_classification.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, AutoModelForTokenClassification, AutoTokenizer, TokenClassificationPipeline, pipeline, ) from transformers.pipelines import AggregationStrategy, TokenClassificationArgumentHandler from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_accelerator, slow, torch_device, ) from .test_pipelines_common import ANY VALID_INPUTS = ["A simple string", ["list of strings", "A simple string that is quite a bit longer"]] # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class TokenClassificationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING tf_model_mapping = TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer) return token_classifier, ["A simple string", "A simple string that is quite a bit longer"] def run_pipeline_test(self, token_classifier, _): model = token_classifier.model tokenizer = token_classifier.tokenizer if not tokenizer.is_fast: return # Slow tokenizers do not return offsets mappings, so this test will fail outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(n) ], ) outputs = token_classifier(["list of strings", "A simple string that is quite a bit longer"]) self.assertIsInstance(outputs, list) self.assertEqual(len(outputs), 2) n = len(outputs[0]) m = len(outputs[1]) self.assertEqual( nested_simplify(outputs), [ [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(n) ], [ { "entity": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "index": ANY(int), "word": ANY(str), } for i in range(m) ], ], ) self.run_aggregation_strategy(model, tokenizer) def run_aggregation_strategy(self, model, tokenizer): token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="simple") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="first") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline(model=model, tokenizer=tokenizer, aggregation_strategy="max") self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.MAX) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="average" ) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.AVERAGE) outputs = token_classifier("A simple string") self.assertIsInstance(outputs, list) n = len(outputs) self.assertEqual( nested_simplify(outputs), [ { "entity_group": ANY(str), "score": ANY(float), "start": ANY(int), "end": ANY(int), "word": ANY(str), } for i in range(n) ], ) with self.assertWarns(UserWarning): token_classifier = pipeline(task="ner", model=model, tokenizer=tokenizer, grouped_entities=True) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.SIMPLE) with self.assertWarns(UserWarning): token_classifier = pipeline( task="ner", model=model, tokenizer=tokenizer, grouped_entities=True, ignore_subwords=True ) self.assertEqual(token_classifier._postprocess_params["aggregation_strategy"], AggregationStrategy.FIRST) @slow @require_torch def test_chunking(self): NER_MODEL = "elastic/distilbert-base-uncased-finetuned-conll03-english" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) tokenizer.model_max_length = 10 stride = 5 sentence = ( "Hugging Face, Inc. is a French company that develops tools for building applications using machine learning. " "The company, based in New York City was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf." ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="simple", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="first", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="max", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) token_classifier = TokenClassificationPipeline( model=model, tokenizer=tokenizer, aggregation_strategy="average", stride=stride ) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "ORG", "score": 0.978, "word": "hugging face, inc.", "start": 0, "end": 18}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 24, "end": 30}, {"entity_group": "LOC", "score": 0.997, "word": "new york city", "start": 131, "end": 144}, {"entity_group": "MISC", "score": 0.999, "word": "french", "start": 168, "end": 174}, {"entity_group": "PER", "score": 0.999, "word": "clement delangue", "start": 189, "end": 205}, {"entity_group": "PER", "score": 0.999, "word": "julien chaumond", "start": 207, "end": 222}, {"entity_group": "PER", "score": 0.999, "word": "thomas wolf", "start": 228, "end": 239}, ], ) @require_torch def test_chunking_fast(self): # Note: We cannot run the test on "conflicts" on the chunking. # The problem is that the model is random, and thus the results do heavily # depend on the chunking, so we cannot expect "abcd" and "bcd" to find # the same entities. We defer to slow tests for this. pipe = pipeline(model="hf-internal-testing/tiny-bert-for-token-classification") sentence = "The company, based in New York City was founded in 2016 by French entrepreneurs" results = pipe(sentence, aggregation_strategy="first") # This is what this random model gives on the full sentence self.assertEqual( nested_simplify(results), [ # This is 2 actual tokens {"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"}, {"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"}, ], ) # This will force the tokenizer to split after "city was". pipe.tokenizer.model_max_length = 12 self.assertEqual( pipe.tokenizer.decode(pipe.tokenizer.encode(sentence, truncation=True)), "[CLS] the company, based in new york city was [SEP]", ) stride = 4 results = pipe(sentence, aggregation_strategy="first", stride=stride) self.assertEqual( nested_simplify(results), [ {"end": 39, "entity_group": "MISC", "score": 0.115, "start": 31, "word": "city was"}, # This is an extra entity found by this random model, but at least both original # entities are there {"end": 58, "entity_group": "MISC", "score": 0.115, "start": 56, "word": "by"}, {"end": 79, "entity_group": "MISC", "score": 0.115, "start": 66, "word": "entrepreneurs"}, ], ) @require_torch @slow def test_spanish_bert(self): # https://github.com/huggingface/transformers/pull/4987 NER_MODEL = "mrm8488/bert-spanish-cased-finetuned-ner" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) sentence = """Consuelo Araújo Noguera, ministra de cultura del presidente Andrés Pastrana (1998.2002) fue asesinada por las Farc luego de haber permanecido secuestrada por algunos meses.""" token_classifier = pipeline("ner", model=model, tokenizer=tokenizer) output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity": "B-PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4, "index": 1}, {"entity": "B-PER", "score": 0.803, "word": "##uelo", "start": 4, "end": 8, "index": 2}, {"entity": "I-PER", "score": 0.999, "word": "Ara", "start": 9, "end": 12, "index": 3}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Cons", "start": 0, "end": 4}, {"entity_group": "PER", "score": 0.966, "word": "##uelo Araújo Noguera", "start": 4, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.999, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.999, "word": "Farc", "start": 110, "end": 114}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.966, "word": "Consuelo Araújo Noguera", "start": 0, "end": 23}, {"entity_group": "PER", "score": 1.0, "word": "Andrés Pastrana", "start": 60, "end": 75}, {"entity_group": "ORG", "score": 0.542, "word": "Farc", "start": 110, "end": 114}, ], ) @require_torch_accelerator @slow def test_accelerator(self): sentence = "This is dummy sentence" ner = pipeline( "token-classification", device=torch_device, aggregation_strategy=AggregationStrategy.SIMPLE, ) output = ner(sentence) self.assertEqual(nested_simplify(output), []) @require_torch @slow def test_dbmdz_english(self): # Other sentence NER_MODEL = "dbmdz/bert-large-cased-finetuned-conll03-english" model = AutoModelForTokenClassification.from_pretrained(NER_MODEL) tokenizer = AutoTokenizer.from_pretrained(NER_MODEL, use_fast=True) sentence = """Enzo works at the UN""" token_classifier = pipeline("ner", model=model, tokenizer=tokenizer) output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity": "I-PER", "score": 0.998, "word": "En", "start": 0, "end": 2, "index": 1}, {"entity": "I-PER", "score": 0.997, "word": "##zo", "start": 2, "end": 4, "index": 2}, {"entity": "I-ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20, "index": 6}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="simple") output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="first") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="max") output = token_classifier(sentence) self.assertEqual( nested_simplify(output[:3]), [ {"entity_group": "PER", "score": 0.998, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) token_classifier = pipeline("ner", model=model, tokenizer=tokenizer, aggregation_strategy="average") output = token_classifier(sentence) self.assertEqual( nested_simplify(output), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 18, "end": 20}, ], ) @require_torch @slow def test_aggregation_strategy_byte_level_tokenizer(self): sentence = "Groenlinks praat over Schiphol." ner = pipeline("ner", model="FacebookAI/xlm-roberta-large-finetuned-conll02-dutch", aggregation_strategy="max") self.assertEqual( nested_simplify(ner(sentence)), [ {"end": 10, "entity_group": "ORG", "score": 0.994, "start": 0, "word": "Groenlinks"}, {"entity_group": "LOC", "score": 1.0, "word": "Schiphol.", "start": 22, "end": 31}, ], ) @require_torch def test_aggregation_strategy_no_b_i_prefix(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test token_classifier.model.config.id2label = {0: "O", 1: "MISC", 2: "PER", 3: "ORG", 4: "LOC"} example = [ { "scores": np.array([0, 0, 0, 0, 0.9968166351318359]), # fmt : skip "index": 1, "is_subword": False, "word": "En", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0, 0.9957635998725891]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", "start": 2, "end": 4, }, { "scores": np.array([0, 0, 0, 0.9986497163772583, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, "start": 11, "end": 13, }, ] self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), [ {"end": 2, "entity": "LOC", "score": 0.997, "start": 0, "word": "En", "index": 1}, {"end": 4, "entity": "LOC", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, {"end": 13, "entity": "ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), [ {"entity_group": "LOC", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) @require_torch def test_aggregation_strategy(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test self.assertEqual( token_classifier.model.config.id2label, {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"}, ) example = [ { "scores": np.array([0, 0, 0, 0, 0.9968166351318359, 0, 0, 0]), # fmt : skip "index": 1, "is_subword": False, "word": "En", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0, 0.9957635998725891, 0, 0, 0]), # fmt : skip "index": 2, "is_subword": True, "word": "##zo", "start": 2, "end": 4, }, { "scores": np.array([0, 0, 0, 0, 0, 0.9986497163772583, 0, 0]), # fmt : skip "index": 7, "word": "UN", "is_subword": False, "start": 11, "end": 13, }, ] self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.NONE)), [ {"end": 2, "entity": "I-PER", "score": 0.997, "start": 0, "word": "En", "index": 1}, {"end": 4, "entity": "I-PER", "score": 0.996, "start": 2, "word": "##zo", "index": 2}, {"end": 13, "entity": "B-ORG", "score": 0.999, "start": 11, "word": "UN", "index": 7}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.SIMPLE)), [ {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.FIRST)), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.MAX)), [ {"entity_group": "PER", "score": 0.997, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)), [ {"entity_group": "PER", "score": 0.996, "word": "Enzo", "start": 0, "end": 4}, {"entity_group": "ORG", "score": 0.999, "word": "UN", "start": 11, "end": 13}, ], ) @require_torch def test_aggregation_strategy_example2(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") # Just to understand scores indexes in this test self.assertEqual( token_classifier.model.config.id2label, {0: "O", 1: "B-MISC", 2: "I-MISC", 3: "B-PER", 4: "I-PER", 5: "B-ORG", 6: "I-ORG", 7: "B-LOC", 8: "I-LOC"}, ) example = [ { # Necessary for AVERAGE "scores": np.array([0, 0.55, 0, 0.45, 0, 0, 0, 0, 0, 0]), "is_subword": False, "index": 1, "word": "Ra", "start": 0, "end": 2, }, { "scores": np.array([0, 0, 0, 0.2, 0, 0, 0, 0.8, 0, 0]), "is_subword": True, "word": "##ma", "start": 2, "end": 4, "index": 2, }, { # 4th score will have the higher average # 4th score is B-PER for this model # It's does not correspond to any of the subtokens. "scores": np.array([0, 0, 0, 0.4, 0, 0, 0.6, 0, 0, 0]), "is_subword": True, "word": "##zotti", "start": 11, "end": 13, "index": 3, }, ] self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.NONE), [ {"end": 2, "entity": "B-MISC", "score": 0.55, "start": 0, "word": "Ra", "index": 1}, {"end": 4, "entity": "B-LOC", "score": 0.8, "start": 2, "word": "##ma", "index": 2}, {"end": 13, "entity": "I-ORG", "score": 0.6, "start": 11, "word": "##zotti", "index": 3}, ], ) self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.FIRST), [{"entity_group": "MISC", "score": 0.55, "word": "Ramazotti", "start": 0, "end": 13}], ) self.assertEqual( token_classifier.aggregate(example, AggregationStrategy.MAX), [{"entity_group": "LOC", "score": 0.8, "word": "Ramazotti", "start": 0, "end": 13}], ) self.assertEqual( nested_simplify(token_classifier.aggregate(example, AggregationStrategy.AVERAGE)), [{"entity_group": "PER", "score": 0.35, "word": "Ramazotti", "start": 0, "end": 13}], ) @require_torch @slow def test_aggregation_strategy_offsets_with_leading_space(self): sentence = "We're from New York" model_name = "brandon25/deberta-base-finetuned-ner" ner = pipeline("ner", model=model_name, ignore_labels=[], aggregation_strategy="max") self.assertEqual( nested_simplify(ner(sentence)), [ {"entity_group": "O", "score": 1.0, "word": " We're from", "start": 0, "end": 10}, {"entity_group": "LOC", "score": 1.0, "word": " New York", "start": 10, "end": 19}, ], ) @require_torch def test_gather_pre_entities(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") sentence = "Hello there" tokens = tokenizer( sentence, return_attention_mask=False, return_tensors="pt", truncation=True, return_special_tokens_mask=True, return_offsets_mapping=True, ) offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] input_ids = tokens["input_ids"].numpy()[0] # First element in [CLS] scores = np.array([[1, 0, 0], [0.1, 0.3, 0.6], [0.8, 0.1, 0.1]]) pre_entities = token_classifier.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy=AggregationStrategy.NONE, ) self.assertEqual( nested_simplify(pre_entities), [ {"word": "Hello", "scores": [0.1, 0.3, 0.6], "start": 0, "end": 5, "is_subword": False, "index": 1}, { "word": "there", "scores": [0.8, 0.1, 0.1], "index": 2, "start": 6, "end": 11, "is_subword": False, }, ], ) @require_torch def test_word_heuristic_leading_space(self): model_name = "hf-internal-testing/tiny-random-deberta-v2" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True) token_classifier = pipeline(task="ner", model=model_name, tokenizer=tokenizer, framework="pt") sentence = "I play the theremin" tokens = tokenizer( sentence, return_attention_mask=False, return_tensors="pt", return_special_tokens_mask=True, return_offsets_mapping=True, ) offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] input_ids = tokens["input_ids"].numpy()[0] scores = np.array([[1, 0] for _ in input_ids]) # values irrelevant for heuristic pre_entities = token_classifier.gather_pre_entities( sentence, input_ids, scores, offset_mapping, special_tokens_mask, aggregation_strategy=AggregationStrategy.FIRST, ) # ensure expected tokenization and correct is_subword values self.assertEqual( [(entity["word"], entity["is_subword"]) for entity in pre_entities], [("▁I", False), ("▁play", False), ("▁the", False), ("▁there", False), ("min", True)], ) @require_tf def test_tf_only(self): model_name = "hf-internal-testing/tiny-random-bert-tf-only" # This model only has a TensorFlow version # We test that if we don't specificy framework='tf', it gets detected automatically token_classifier = pipeline(task="ner", model=model_name) self.assertEqual(token_classifier.framework, "tf") @require_tf def test_small_model_tf(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="tf") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], ) @require_torch def test_no_offset_tokenizer(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) token_classifier = pipeline(task="token-classification", model=model_name, tokenizer=tokenizer, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": None, "end": None}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": None, "end": None}, ], ) @require_torch def test_small_model_pt(self): model_name = "hf-internal-testing/tiny-bert-for-token-classification" token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], ) token_classifier = pipeline( task="token-classification", model=model_name, framework="pt", ignore_labels=["O", "I-MISC"] ) outputs = token_classifier("This is a test !") self.assertEqual( nested_simplify(outputs), [], ) token_classifier = pipeline(task="token-classification", model=model_name, framework="pt") # Overload offset_mapping outputs = token_classifier( "This is a test !", offset_mapping=[(0, 0), (0, 1), (0, 2), (0, 0), (0, 0), (0, 0), (0, 0)] ) self.assertEqual( nested_simplify(outputs), [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 1}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 0, "end": 2}, ], ) # Batch size does not affect outputs (attention_mask are required) sentences = ["This is a test !", "Another test this is with longer sentence"] outputs = token_classifier(sentences) outputs_batched = token_classifier(sentences, batch_size=2) # Batching does not make a difference in predictions self.assertEqual(nested_simplify(outputs_batched), nested_simplify(outputs)) self.assertEqual( nested_simplify(outputs_batched), [ [ {"entity": "I-MISC", "score": 0.115, "index": 1, "word": "this", "start": 0, "end": 4}, {"entity": "I-MISC", "score": 0.115, "index": 2, "word": "is", "start": 5, "end": 7}, ], [], ], ) @require_torch def test_pt_ignore_subwords_slow_tokenizer_raises(self): model_name = "sshleifer/tiny-dbmdz-bert-large-cased-finetuned-conll03-english" tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=False) with self.assertRaises(ValueError): pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.FIRST) with self.assertRaises(ValueError): pipeline( task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.AVERAGE ) with self.assertRaises(ValueError): pipeline(task="ner", model=model_name, tokenizer=tokenizer, aggregation_strategy=AggregationStrategy.MAX) @slow @require_torch def test_simple(self): token_classifier = pipeline(task="ner", model="dslim/bert-base-NER", grouped_entities=True) sentence = "Hello Sarah Jessica Parker who Jessica lives in New York" sentence2 = "This is a simple test" output = token_classifier(sentence) output_ = nested_simplify(output) self.assertEqual( output_, [ { "entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26, }, {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38}, {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56}, ], ) output = token_classifier([sentence, sentence2]) output_ = nested_simplify(output) self.assertEqual( output_, [ [ {"entity_group": "PER", "score": 0.996, "word": "Sarah Jessica Parker", "start": 6, "end": 26}, {"entity_group": "PER", "score": 0.977, "word": "Jessica", "start": 31, "end": 38}, {"entity_group": "LOC", "score": 0.999, "word": "New York", "start": 48, "end": 56}, ], [], ], ) class TokenClassificationArgumentHandlerTestCase(unittest.TestCase): def setUp(self): self.args_parser = TokenClassificationArgumentHandler() def test_simple(self): string = "This is a simple input" inputs, offset_mapping = self.args_parser(string) self.assertEqual(inputs, [string]) self.assertEqual(offset_mapping, None) inputs, offset_mapping = self.args_parser([string, string]) self.assertEqual(inputs, [string, string]) self.assertEqual(offset_mapping, None) inputs, offset_mapping = self.args_parser(string, offset_mapping=[(0, 1), (1, 2)]) self.assertEqual(inputs, [string]) self.assertEqual(offset_mapping, [[(0, 1), (1, 2)]]) inputs, offset_mapping = self.args_parser( [string, string], offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]] ) self.assertEqual(inputs, [string, string]) self.assertEqual(offset_mapping, [[(0, 1), (1, 2)], [(0, 2), (2, 3)]]) def test_errors(self): string = "This is a simple input" # 2 sentences, 1 offset_mapping, args with self.assertRaises(TypeError): self.args_parser(string, string, offset_mapping=[[(0, 1), (1, 2)]]) # 2 sentences, 1 offset_mapping, args with self.assertRaises(TypeError): self.args_parser(string, string, offset_mapping=[(0, 1), (1, 2)]) # 2 sentences, 1 offset_mapping, input_list with self.assertRaises(ValueError): self.args_parser([string, string], offset_mapping=[[(0, 1), (1, 2)]]) # 2 sentences, 1 offset_mapping, input_list with self.assertRaises(ValueError): self.args_parser([string, string], offset_mapping=[(0, 1), (1, 2)]) # 1 sentences, 2 offset_mapping with self.assertRaises(ValueError): self.args_parser(string, offset_mapping=[[(0, 1), (1, 2)], [(0, 2), (2, 3)]]) # 0 sentences, 1 offset_mapping with self.assertRaises(TypeError): self.args_parser(offset_mapping=[[(0, 1), (1, 2)]])
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_image_to_text.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import requests from transformers import MODEL_FOR_VISION_2_SEQ_MAPPING, TF_MODEL_FOR_VISION_2_SEQ_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision class ImageToTextPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_VISION_2_SEQ_MAPPING tf_model_mapping = TF_MODEL_FOR_VISION_2_SEQ_MAPPING def get_test_pipeline(self, model, tokenizer, processor): pipe = pipeline("image-to-text", model=model, tokenizer=tokenizer, image_processor=processor) examples = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "./tests/fixtures/tests_samples/COCO/000000039769.png", ] return pipe, examples def run_pipeline_test(self, pipe, examples): outputs = pipe(examples) self.assertEqual( outputs, [ [{"generated_text": ANY(str)}], [{"generated_text": ANY(str)}], ], ) @require_tf def test_small_model_tf(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) outputs = pipe(image, max_new_tokens=1) self.assertEqual( outputs, [{"generated_text": "growth"}], ) @require_torch def test_small_model_pt(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-vit-gpt2") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual( outputs, [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" }, ], ) outputs = pipe([image, image]) self.assertEqual( outputs, [ [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], [ { "generated_text": "growthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthgrowthGOGO" } ], ], ) @require_torch def test_small_model_pt_conditional(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe(image, prompt=prompt) self.assertTrue(outputs[0]["generated_text"].startswith(prompt)) @require_torch def test_consistent_batching_behaviour(self): pipe = pipeline("image-to-text", model="hf-internal-testing/tiny-random-BlipForConditionalGeneration") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" prompt = "a photo of" outputs = pipe([image, image], prompt=prompt) self.assertTrue(outputs[0][0]["generated_text"].startswith(prompt)) self.assertTrue(outputs[1][0]["generated_text"].startswith(prompt)) outputs = pipe([image, image], prompt=prompt, batch_size=2) self.assertTrue(outputs[0][0]["generated_text"].startswith(prompt)) self.assertTrue(outputs[1][0]["generated_text"].startswith(prompt)) from torch.utils.data import Dataset class MyDataset(Dataset): def __len__(self): return 5 def __getitem__(self, i): return "./tests/fixtures/tests_samples/COCO/000000039769.png" dataset = MyDataset() for batch_size in (1, 2, 4): outputs = pipe(dataset, prompt=prompt, batch_size=batch_size if batch_size > 1 else None) self.assertTrue(list(outputs)[0][0]["generated_text"].startswith(prompt)) self.assertTrue(list(outputs)[1][0]["generated_text"].startswith(prompt)) @slow @require_torch def test_large_model_pt(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a pink pokemon pokemon with a blue shirt and a blue shirt"}]) @slow @require_torch def test_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/pokemon.png" image = Image.open(requests.get(url, stream=True).raw) outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cartoon of a purple character."}]) @slow @require_torch def test_conditional_generation_pt_blip(self): pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photography of" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photography of a volcano"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_git(self): pipe = pipeline("image-to-text", model="microsoft/git-base-coco") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "a photo of a" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "a photo of a tent with a tent and a tent in the background."}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_torch def test_conditional_generation_pt_pix2struct(self): pipe = pipeline("image-to-text", model="google/pix2struct-ai2d-base") url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg" image = Image.open(requests.get(url, stream=True).raw) prompt = "What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud" outputs = pipe(image, prompt=prompt) self.assertEqual(outputs, [{"generated_text": "ash cloud"}]) with self.assertRaises(ValueError): outputs = pipe([image, image], prompt=[prompt, prompt]) @slow @require_tf def test_large_model_tf(self): pipe = pipeline("image-to-text", model="ydshieh/vit-gpt2-coco-en", framework="tf") image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = pipe(image) self.assertEqual(outputs, [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}]) outputs = pipe([image, image]) self.assertEqual( outputs, [ [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], [{"generated_text": "a cat laying on a blanket next to a cat laying on a bed "}], ], ) @slow @require_torch def test_conditional_generation_llava(self): pipe = pipeline("image-to-text", model="llava-hf/bakLlava-v1-hf") prompt = ( "<image>\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT:" ) outputs = pipe( "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/ai2d-demo.jpg", prompt=prompt, generate_kwargs={"max_new_tokens": 200}, ) self.assertEqual( outputs, [ { "generated_text": "\nUSER: What does the label 15 represent? (1) lava (2) core (3) tunnel (4) ash cloud?\nASSISTANT: Lava" } ], ) @slow @require_torch def test_nougat(self): pipe = pipeline("image-to-text", "facebook/nougat-base") outputs = pipe("https://huggingface.co/datasets/Xenova/transformers.js-docs/resolve/main/nougat_paper.png") self.assertEqual( outputs, [{"generated_text": "# Nougat: Neural Optical Understanding for Academic Documents\n\n Lukas Blec"}], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_automatic_speech_recognition.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import pytest from datasets import Audio, load_dataset from huggingface_hub import hf_hub_download, snapshot_download from transformers import ( MODEL_FOR_CTC_MAPPING, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, Speech2TextForConditionalGeneration, Wav2Vec2ForCTC, WhisperForConditionalGeneration, ) from transformers.pipelines import AutomaticSpeechRecognitionPipeline, pipeline from transformers.pipelines.audio_utils import chunk_bytes_iter from transformers.pipelines.automatic_speech_recognition import _find_timestamp_sequence, chunk_iter from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_pyctcdecode, require_tf, require_torch, require_torch_accelerator, require_torchaudio, slow, torch_device, ) from .test_pipelines_common import ANY if is_torch_available(): import torch # We can't use this mixin because it assumes TF support. # from .test_pipelines_common import CustomInputPipelineCommonMixin @is_pipeline_test class AutomaticSpeechRecognitionPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING.items()) if MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING else []) + (MODEL_FOR_CTC_MAPPING.items() if MODEL_FOR_CTC_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: # Side effect of no Fast Tokenizer class for these model, so skipping # But the slow tokenizer test should still run as they're quite small self.skipTest("No tokenizer available") return # return None, None speech_recognizer = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=processor ) # test with a raw waveform audio = np.zeros((34000,)) audio2 = np.zeros((14000,)) return speech_recognizer, [audio, audio2] def run_pipeline_test(self, speech_recognizer, examples): audio = np.zeros((34000,)) outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) # Striding audio = {"raw": audio, "stride": (0, 4000), "sampling_rate": speech_recognizer.feature_extractor.sampling_rate} if speech_recognizer.type == "ctc": outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) elif "Whisper" in speech_recognizer.model.__class__.__name__: outputs = speech_recognizer(audio) self.assertEqual(outputs, {"text": ANY(str)}) else: # Non CTC models cannot use striding. with self.assertRaises(ValueError): outputs = speech_recognizer(audio) # Timestamps audio = np.zeros((34000,)) if speech_recognizer.type == "ctc": outputs = speech_recognizer(audio, return_timestamps="char") self.assertIsInstance(outputs["chunks"], list) n = len(outputs["chunks"]) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)], }, ) outputs = speech_recognizer(audio, return_timestamps="word") self.assertIsInstance(outputs["chunks"], list) n = len(outputs["chunks"]) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(n)], }, ) elif "Whisper" in speech_recognizer.model.__class__.__name__: outputs = speech_recognizer(audio, return_timestamps=True) self.assertIsInstance(outputs["chunks"], list) nb_chunks = len(outputs["chunks"]) self.assertGreater(nb_chunks, 0) self.assertEqual( outputs, { "text": ANY(str), "chunks": [{"text": ANY(str), "timestamp": (ANY(float), ANY(float))} for i in range(nb_chunks)], }, ) else: # Non CTC models cannot use return_timestamps with self.assertRaisesRegex( ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$" ): outputs = speech_recognizer(audio, return_timestamps="char") @require_torch @slow def test_pt_defaults(self): pipeline("automatic-speech-recognition", framework="pt") @require_torch def test_small_model_pt(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/s2t-small-mustc-en-fr-st", tokenizer="facebook/s2t-small-mustc-en-fr-st", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": "(Applaudissements)"}) output = speech_recognizer(waveform, chunk_length_s=10) self.assertEqual(output, {"text": "(Applaudissements)"}) # Non CTC models cannot use return_timestamps with self.assertRaisesRegex( ValueError, "^We cannot return_timestamps yet on non-CTC models apart from Whisper!$" ): _ = speech_recognizer(waveform, return_timestamps="char") @slow @require_torch_accelerator def test_whisper_fp16(self): speech_recognizer = pipeline( model="openai/whisper-base", device=torch_device, torch_dtype=torch.float16, ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) speech_recognizer(waveform) @require_torch def test_small_model_pt_seq2seq(self): speech_recognizer = pipeline( model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": "あл ش 湯 清 ه ܬ া लᆨしث ल eか u w 全 u"}) @require_torch def test_small_model_pt_seq2seq_gen_kwargs(self): speech_recognizer = pipeline( model="hf-internal-testing/tiny-random-speech-encoder-decoder", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform, max_new_tokens=10, generate_kwargs={"num_beams": 2}) self.assertEqual(output, {"text": "あл † γ ت ב オ 束 泣 足"}) @slow @require_torch @require_pyctcdecode def test_large_model_pt_with_lm(self): dataset = load_dataset("Narsil/asr_dummy", streaming=True) third_item = next(iter(dataset["test"].skip(3))) filename = third_item["file"] speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm", framework="pt", ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") output = speech_recognizer(filename) self.assertEqual( output, {"text": "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumaje"}, ) # Override back to pure CTC speech_recognizer.type = "ctc" output = speech_recognizer(filename) # plumajre != plumaje self.assertEqual( output, { "text": ( "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajre" ) }, ) speech_recognizer.type = "ctc_with_lm" # Simple test with CTC with LM, chunking + timestamps output = speech_recognizer(filename, chunk_length_s=2.0, return_timestamps="word") self.assertEqual( output, { "text": ( "y en las ramas medio sumergidas revoloteaban algunos pájaros de quimérico y legendario plumajcri" ), "chunks": [ {"text": "y", "timestamp": (0.52, 0.54)}, {"text": "en", "timestamp": (0.6, 0.68)}, {"text": "las", "timestamp": (0.74, 0.84)}, {"text": "ramas", "timestamp": (0.94, 1.24)}, {"text": "medio", "timestamp": (1.32, 1.52)}, {"text": "sumergidas", "timestamp": (1.56, 2.22)}, {"text": "revoloteaban", "timestamp": (2.36, 3.0)}, {"text": "algunos", "timestamp": (3.06, 3.38)}, {"text": "pájaros", "timestamp": (3.46, 3.86)}, {"text": "de", "timestamp": (3.92, 4.0)}, {"text": "quimérico", "timestamp": (4.08, 4.6)}, {"text": "y", "timestamp": (4.66, 4.68)}, {"text": "legendario", "timestamp": (4.74, 5.26)}, {"text": "plumajcri", "timestamp": (5.34, 5.74)}, ], }, ) # CTC + LM models cannot use return_timestamps="char" with self.assertRaisesRegex( ValueError, "^CTC with LM can only predict word level timestamps, set `return_timestamps='word'`$" ): _ = speech_recognizer(filename, return_timestamps="char") @require_tf def test_small_model_tf(self): self.skipTest("Tensorflow not supported yet.") @require_torch def test_torch_small_no_tokenizer_files(self): # test that model without tokenizer file cannot be loaded with pytest.raises(OSError): pipeline( task="automatic-speech-recognition", model="patrickvonplaten/tiny-wav2vec2-no-tokenizer", framework="pt", ) @require_torch @slow def test_torch_large(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-base-960h", tokenizer="facebook/wav2vec2-base-960h", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) @require_torch @slow def test_torch_large_with_input_features(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-audio/wav2vec2-bert-CV16-en", framework="pt", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = speech_recognizer(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "a man said to the universe sir i exist"}) @slow @require_torch @slow def test_return_timestamps_in_preprocess(self): pipe = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", chunk_length_s=8, stride_length_s=1, ) data = load_dataset("librispeech_asr", "clean", split="test", streaming=True) sample = next(iter(data)) pipe.model.config.forced_decoder_ids = pipe.tokenizer.get_decoder_prompt_ids(language="en", task="transcribe") res = pipe(sample["audio"]["array"]) self.assertEqual(res, {"text": " Conquered returned to its place amidst the tents."}) res = pipe(sample["audio"]["array"], return_timestamps=True) self.assertEqual( res, { "text": " Conquered returned to its place amidst the tents.", "chunks": [{"timestamp": (0.0, 3.36), "text": " Conquered returned to its place amidst the tents."}], }, ) pipe.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] res = pipe(sample["audio"]["array"], return_timestamps="word") # fmt: off self.assertEqual( res, { 'text': ' Conquered returned to its place amidst the tents.', 'chunks': [ {'text': ' Conquered', 'timestamp': (0.5, 1.2)}, {'text': ' returned', 'timestamp': (1.2, 1.64)}, {'text': ' to', 'timestamp': (1.64, 1.84)}, {'text': ' its', 'timestamp': (1.84, 2.02)}, {'text': ' place', 'timestamp': (2.02, 2.28)}, {'text': ' amidst', 'timestamp': (2.28, 2.8)}, {'text': ' the', 'timestamp': (2.8, 2.98)}, {'text': ' tents.', 'timestamp': (2.98, 3.48)}, ], }, ) # fmt: on @slow @require_torch def test_return_timestamps_in_preprocess_longform(self): pipe = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", ) data = load_dataset("librispeech_asr", "clean", split="test", streaming=True) samples = [next(iter(data)) for _ in range(8)] audio = np.concatenate([sample["audio"]["array"] for sample in samples]) res = pipe(audio) expected_output = { "text": " Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst " "the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst " "the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst " "the tents. Concord returned to its place amidst the tents." } self.assertEqual(res, expected_output) res = pipe(audio, return_timestamps=True) self.assertEqual( res, { "text": " Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents. Concord returned to its place amidst the tents.", "chunks": [ {"timestamp": (0.0, 3.22), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (3.22, 6.74), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (6.74, 10.26), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (10.26, 13.78), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (13.78, 17.3), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (17.3, 20.82), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (20.82, 24.34), "text": " Concord returned to its place amidst the tents."}, {"timestamp": (24.34, 27.86), "text": " Concord returned to its place amidst the tents."}, ], }, ) pipe.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] res = pipe(audio, return_timestamps="word") # fmt: off self.assertEqual( res["chunks"][:15], [ {"text": " Concord", "timestamp": (0.5, 0.94)}, {"text": " returned", "timestamp": (0.94, 1.52)}, {"text": " to", "timestamp": (1.52, 1.78)}, {"text": " its", "timestamp": (1.78, 1.98)}, {"text": " place", "timestamp": (1.98, 2.16)}, {"text": " amidst", "timestamp": (2.16, 2.5)}, {"text": " the", "timestamp": (2.5, 2.9)}, {"text": " tents.", "timestamp": (2.9, 4.2)}, {"text": " Concord", "timestamp": (4.2, 4.5)}, {"text": " returned", "timestamp": (4.5, 5.0)}, {"text": " to", "timestamp": (5.0, 5.28)}, {"text": " its", "timestamp": (5.28, 5.48)}, {"text": " place", "timestamp": (5.48, 5.7)}, {"text": " amidst", "timestamp": (5.7, 6.02)}, {"text": " the", "timestamp": (6.02, 6.4)} ], ) # fmt: on @require_torch def test_return_timestamps_in_init(self): # segment-level timestamps are accepted model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") tokenizer = AutoTokenizer.from_pretrained("openai/whisper-tiny") feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-tiny") dummy_speech = np.ones(100) pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps=True, ) _ = pipe(dummy_speech) # word-level timestamps are accepted pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps="word", ) _ = pipe(dummy_speech) # char-level timestamps are not accepted with self.assertRaisesRegex( ValueError, "^Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively.$", ): pipe = pipeline( task="automatic-speech-recognition", model=model, feature_extractor=feature_extractor, tokenizer=tokenizer, chunk_length_s=8, stride_length_s=1, return_timestamps="char", ) _ = pipe(dummy_speech) @require_torch @slow def test_torch_whisper(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."}) output = speech_recognizer([filename], chunk_length_s=5, batch_size=4) self.assertEqual(output, [{"text": " A man said to the universe, Sir, I exist."}]) @slow def test_find_longest_common_subsequence(self): max_source_positions = 1500 processor = AutoProcessor.from_pretrained("openai/whisper-tiny") previous_sequence = [[51492, 406, 3163, 1953, 466, 13, 51612, 51612]] self.assertEqual( processor.decode(previous_sequence[0], output_offsets=True), { "text": " not worth thinking about.", "offsets": [{"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}], }, ) # Merge when the previous sequence is a suffix of the next sequence # fmt: off next_sequences_1 = [ [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 50614, 50614, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] ] # fmt: on self.assertEqual( processor.decode(next_sequences_1[0], output_offsets=True), { "text": ( " of spectators, retrievality is not worth thinking about. His instant panic was followed by a" " small, sharp blow high on his chest.<|endoftext|>" ), "offsets": [ {"text": " of spectators, retrievality is not worth thinking about.", "timestamp": (0.0, 5.0)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (5.0, 9.4), }, ], }, ) merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_1, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51739, 51739, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959], ) # fmt: on self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 27.5)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (27.5, 31.900000000000002), }, ], }, ) # Merge when the sequence is in the middle of the 1st next sequence # fmt: off next_sequences_2 = [ [50364, 295, 6177, 3391, 11, 19817, 3337, 507, 307, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50834, 50257] ] # fmt: on # {'text': ' of spectators, retrievality is not worth thinking about. His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_2, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) # fmt: off self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51959], ) # fmt: on self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on" " his chest." ), "timestamp": (22.56, 31.900000000000002), }, ], }, ) # Merge when the previous sequence is not included in the current sequence next_sequences_3 = [[50364, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50584, 50257]] # fmt: skip # {'text': ' His instant panic was followed by a small, sharp blow high on his chest.','timestamp': (0.0, 9.4)} merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 120_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51832], ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (24.96, 29.36), }, ], }, ) # last case is when the sequence is not in the first next predicted start and end of timestamp next_sequences_3 = [ [50364, 2812, 9836, 14783, 390, 406, 3163, 1953, 466, 13, 50634, 50634, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 50934] ] # fmt: skip merge = _find_timestamp_sequence( [[previous_sequence, (480_000, 0, 0)], [next_sequences_3, (480_000, 167_000, 0)]], processor.tokenizer, processor.feature_extractor, max_source_positions, ) self.assertEqual( merge, [51492, 406, 3163, 1953, 466, 13, 51612, 51612, 2812, 9836, 14783, 390, 6263, 538, 257, 1359, 11, 8199, 6327, 1090, 322, 702, 7443, 13, 51912] ) # fmt: skip self.assertEqual( processor.decode(merge, output_offsets=True), { "text": ( " not worth thinking about. His instant panic was followed by a small, sharp blow high on his" " chest." ), "offsets": [ {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (24.96, 30.96), }, ], }, ) @slow @require_torch def test_whisper_timestamp_prediction(self): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") array = np.concatenate( [ds[40]["audio"]["array"], ds[41]["audio"]["array"], ds[42]["audio"]["array"], ds[43]["audio"]["array"]] ) pipe = pipeline( model="openai/whisper-small", return_timestamps=True, ) output = pipe(ds[40]["audio"]) self.assertDictEqual( output, { "text": " A man said to the universe, Sir, I exist.", "chunks": [{"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 4.26)}], }, ) output = pipe(array, chunk_length_s=10) self.assertDictEqual( nested_simplify(output), { "chunks": [ {"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)}, { "text": ( " Sweat covered Brion's body, trickling into the " "tight-loan cloth that was the only garment he wore, the " "cut" ), "timestamp": (5.5, 11.95), }, { "text": ( " on his chest still dripping blood, the ache of his " "overstrained eyes, even the soaring arena around him " "with" ), "timestamp": (11.95, 19.61), }, { "text": " the thousands of spectators, retrievality is not worth thinking about.", "timestamp": (19.61, 25.0), }, { "text": " His instant panic was followed by a small, sharp blow high on his chest.", "timestamp": (25.0, 29.4), }, ], "text": ( " A man said to the universe, Sir, I exist. Sweat covered Brion's " "body, trickling into the tight-loan cloth that was the only garment " "he wore, the cut on his chest still dripping blood, the ache of his " "overstrained eyes, even the soaring arena around him with the " "thousands of spectators, retrievality is not worth thinking about. " "His instant panic was followed by a small, sharp blow high on his " "chest." ), }, ) output = pipe(array) self.assertDictEqual( output, { "chunks": [ {"text": " A man said to the universe, Sir, I exist.", "timestamp": (0.0, 5.5)}, { "text": ( " Sweat covered Brion's body, trickling into the " "tight-loan cloth that was the only garment" ), "timestamp": (5.5, 10.18), }, {"text": " he wore.", "timestamp": (10.18, 11.68)}, {"text": " The cut on his chest still dripping blood.", "timestamp": (11.68, 14.92)}, {"text": " The ache of his overstrained eyes.", "timestamp": (14.92, 17.6)}, { "text": ( " Even the soaring arena around him with the thousands of spectators were trivialities" ), "timestamp": (17.6, 22.56), }, {"text": " not worth thinking about.", "timestamp": (22.56, 24.96)}, ], "text": ( " A man said to the universe, Sir, I exist. Sweat covered Brion's " "body, trickling into the tight-loan cloth that was the only garment " "he wore. The cut on his chest still dripping blood. The ache of his " "overstrained eyes. Even the soaring arena around him with the " "thousands of spectators were trivialities not worth thinking about." ), }, ) @slow @require_torch def test_whisper_word_timestamps_batched(self): pipe = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", chunk_length_s=3, return_timestamps="word", ) data = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = data[0]["audio"] # not the same output as test_simple_whisper_asr because of chunking EXPECTED_OUTPUT = { "text": " Mr. Quilder is the apostle of the middle classes and we are glad to welcome his gospel.", "chunks": [ {"text": " Mr.", "timestamp": (0.48, 0.96)}, {"text": " Quilder", "timestamp": (0.96, 1.24)}, {"text": " is", "timestamp": (1.24, 1.5)}, {"text": " the", "timestamp": (1.5, 1.72)}, {"text": " apostle", "timestamp": (1.72, 1.98)}, {"text": " of", "timestamp": (1.98, 2.32)}, {"text": " the", "timestamp": (2.32, 2.5)}, {"text": " middle", "timestamp": (2.5, 2.68)}, {"text": " classes", "timestamp": (2.68, 3.2)}, {"text": " and", "timestamp": (3.2, 3.56)}, {"text": " we", "timestamp": (3.56, 3.68)}, {"text": " are", "timestamp": (3.68, 3.8)}, {"text": " glad", "timestamp": (3.8, 4.1)}, {"text": " to", "timestamp": (4.1, 4.34)}, {"text": " welcome", "timestamp": (4.3, 4.6)}, {"text": " his", "timestamp": (4.6, 4.94)}, {"text": " gospel.", "timestamp": (4.94, 5.82)}, ], } # batch size 1: copy the audio sample since pipeline consumes it output = pipe(sample.copy(), batch_size=1) self.assertDictEqual(output, EXPECTED_OUTPUT) # batch size 2: input audio is chunked into smaller pieces so it's testing batching output = pipe(sample, batch_size=2) self.assertDictEqual(output, EXPECTED_OUTPUT) @require_torch @slow def test_torch_speech_encoder_decoder(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/s2t-wav2vec2-large-en-de", feature_extractor="facebook/s2t-wav2vec2-large-en-de", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": 'Ein Mann sagte zum Universum : " Sir, ich existiert! "'}) @slow @require_torch def test_simple_wav2vec2(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = asr(waveform) self.assertEqual(output, {"text": ""}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = asr(filename) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) filename = ds[40]["file"] with open(filename, "rb") as f: data = f.read() output = asr(data) self.assertEqual(output, {"text": "A MAN SAID TO THE UNIVERSE SIR I EXIST"}) @slow @require_torch @require_torchaudio def test_simple_s2t(self): model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-mustc-en-it-st") tokenizer = AutoTokenizer.from_pretrained("facebook/s2t-small-mustc-en-it-st") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/s2t-small-mustc-en-it-st") asr = AutomaticSpeechRecognitionPipeline(model=model, tokenizer=tokenizer, feature_extractor=feature_extractor) waveform = np.tile(np.arange(1000, dtype=np.float32), 34) output = asr(waveform) self.assertEqual(output, {"text": "(Applausi)"}) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = asr(filename) self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."}) filename = ds[40]["file"] with open(filename, "rb") as f: data = f.read() output = asr(data) self.assertEqual(output, {"text": "Un uomo disse all'universo: \"Signore, io esisto."}) @slow @require_torch @require_torchaudio def test_simple_whisper_asr(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") filename = ds[0]["file"] output = speech_recognizer(filename) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, ) output = speech_recognizer(filename, return_timestamps=True) self.assertEqual( output, { "text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.", "chunks": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 5.44), } ], }, ) speech_recognizer.model.generation_config.alignment_heads = [[2, 2], [3, 0], [3, 2], [3, 3], [3, 4], [3, 5]] output = speech_recognizer(filename, return_timestamps="word") # fmt: off self.assertEqual( output, { 'text': ' Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel.', 'chunks': [ {'text': ' Mr.', 'timestamp': (0.38, 1.04)}, {'text': ' Quilter', 'timestamp': (1.04, 1.18)}, {'text': ' is', 'timestamp': (1.18, 1.44)}, {'text': ' the', 'timestamp': (1.44, 1.58)}, {'text': ' apostle', 'timestamp': (1.58, 1.98)}, {'text': ' of', 'timestamp': (1.98, 2.32)}, {'text': ' the', 'timestamp': (2.32, 2.46)}, {'text': ' middle', 'timestamp': (2.46, 2.56)}, {'text': ' classes,', 'timestamp': (2.56, 3.4)}, {'text': ' and', 'timestamp': (3.4, 3.54)}, {'text': ' we', 'timestamp': (3.54, 3.62)}, {'text': ' are', 'timestamp': (3.62, 3.72)}, {'text': ' glad', 'timestamp': (3.72, 4.0)}, {'text': ' to', 'timestamp': (4.0, 4.26)}, {'text': ' welcome', 'timestamp': (4.26, 4.56)}, {'text': ' his', 'timestamp': (4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.84)} ] } ) # fmt: on # Whisper can only predict segment level timestamps or word level, not character level with self.assertRaisesRegex( ValueError, "^Whisper cannot return `char` timestamps, only word level or segment level timestamps. " "Use `return_timestamps='word'` or `return_timestamps=True` respectively.$", ): _ = speech_recognizer(filename, return_timestamps="char") @slow @require_torch @require_torchaudio def test_simple_whisper_translation(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-large", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": " A man said to the universe, Sir, I exist."}) model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") tokenizer = AutoTokenizer.from_pretrained("openai/whisper-large") feature_extractor = AutoFeatureExtractor.from_pretrained("openai/whisper-large") speech_recognizer_2 = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor ) output_2 = speech_recognizer_2(filename) self.assertEqual(output, output_2) # either use generate_kwargs or set the model's generation_config # model.generation_config.task = "transcribe" # model.generation_config.lang = "<|it|>" speech_translator = AutomaticSpeechRecognitionPipeline( model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, generate_kwargs={"task": "transcribe", "language": "<|it|>"}, ) output_3 = speech_translator(filename) self.assertEqual(output_3, {"text": " Un uomo ha detto all'universo, Sir, esiste."}) @slow @require_torch def test_whisper_language(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny.en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") filename = ds[0]["file"] # 1. English-only model compatible with no language argument output = speech_recognizer(filename) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel."}, ) # 2. English-only Whisper does not accept the language argument with self.assertRaisesRegex( ValueError, "Cannot specify `task` or `language` for an English-only model. If the model is intended to be multilingual, " "pass `is_multilingual=True` to generate, or update the generation config.", ): _ = speech_recognizer(filename, generate_kwargs={"language": "en"}) # 3. Multilingual model accepts language argument speech_recognizer = pipeline( task="automatic-speech-recognition", model="openai/whisper-tiny", framework="pt", ) output = speech_recognizer(filename, generate_kwargs={"language": "en"}) self.assertEqual( output, {"text": " Mr. Quilter is the apostle of the middle classes and we are glad to welcome his gospel."}, ) @slow @require_torch @require_torchaudio def test_xls_r_to_en(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-xls-r-1b-21-to-en", feature_extractor="facebook/wav2vec2-xls-r-1b-21-to-en", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "A man said to the universe: “Sir, I exist."}) @slow @require_torch @require_torchaudio def test_xls_r_from_en(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-xls-r-1b-en-to-15", feature_extractor="facebook/wav2vec2-xls-r-1b-en-to-15", framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "Ein Mann sagte zu dem Universum, Sir, ich bin da."}) @slow @require_torch @require_torchaudio def test_speech_to_text_leveraged(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-2-bart-base", feature_extractor="patrickvonplaten/wav2vec2-2-bart-base", tokenizer=AutoTokenizer.from_pretrained("patrickvonplaten/wav2vec2-2-bart-base"), framework="pt", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") filename = ds[40]["file"] output = speech_recognizer(filename) self.assertEqual(output, {"text": "a man said to the universe sir i exist"}) @slow @require_torch_accelerator def test_wav2vec2_conformer_float16(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="facebook/wav2vec2-conformer-rope-large-960h-ft", device=torch_device, torch_dtype=torch.float16, framework="pt", ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] output = speech_recognizer(sample) self.assertEqual( output, {"text": "MISTER QUILTER IS THE APOSTLE OF THE MIDDLE CLASSES AND WE ARE GLAD TO WELCOME HIS GOSPEL"}, ) @require_torch def test_chunking_fast(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "ZBT ZC") @require_torch def test_return_timestamps_ctc_fast(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") # Take short audio to keep the test readable audio = ds[40]["audio"]["array"][:800] output = speech_recognizer(audio, return_timestamps="char") self.assertEqual( output, { "text": "ZBT ZX G", "chunks": [ {"text": " ", "timestamp": (0.0, 0.012)}, {"text": "Z", "timestamp": (0.012, 0.016)}, {"text": "B", "timestamp": (0.016, 0.02)}, {"text": "T", "timestamp": (0.02, 0.024)}, {"text": " ", "timestamp": (0.024, 0.028)}, {"text": "Z", "timestamp": (0.028, 0.032)}, {"text": "X", "timestamp": (0.032, 0.036)}, {"text": " ", "timestamp": (0.036, 0.04)}, {"text": "G", "timestamp": (0.04, 0.044)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word") self.assertEqual( output, { "text": "ZBT ZX G", "chunks": [ {"text": "ZBT", "timestamp": (0.012, 0.024)}, {"text": "ZX", "timestamp": (0.028, 0.036)}, {"text": "G", "timestamp": (0.04, 0.044)}, ], }, ) @require_torch @require_pyctcdecode def test_chunking_fast_with_lm(self): speech_recognizer = pipeline( model="hf-internal-testing/processor_with_lm", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) # Batch_size = 1 output1 = speech_recognizer([audio_tiled], batch_size=1) self.assertEqual(output1, [{"text": ANY(str)}]) self.assertEqual(output1[0]["text"][:6], "<s> <s") # batch_size = 2 output2 = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output2, [{"text": ANY(str)}]) self.assertEqual(output2[0]["text"][:6], "<s> <s") # TODO There is an offby one error because of the ratio. # Maybe logits get affected by the padding on this random # model is more likely. Add some masking ? # self.assertEqual(output1, output2) @require_torch @require_pyctcdecode def test_with_lm_fast(self): speech_recognizer = pipeline( model="hf-internal-testing/processor_with_lm", ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "<s> <s") # Making sure the argument are passed to the decoder # Since no change happens in the result, check the error comes from # the `decode_beams` function. with self.assertRaises(TypeError) as e: output = speech_recognizer([audio_tiled], decoder_kwargs={"num_beams": 2}) self.assertContains(e.msg, "TypeError: decode_beams() got an unexpected keyword argument 'num_beams'") output = speech_recognizer([audio_tiled], decoder_kwargs={"beam_width": 2}) @require_torch @require_pyctcdecode def test_with_local_lm_fast(self): local_dir = snapshot_download("hf-internal-testing/processor_with_lm") speech_recognizer = pipeline( task="automatic-speech-recognition", model=local_dir, ) self.assertEqual(speech_recognizer.type, "ctc_with_lm") ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 2 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ANY(str)}]) self.assertEqual(output[0]["text"][:6], "<s> <s") @require_torch @slow def test_whisper_longform(self): # fmt: off EXPECTED_RESULT = " Folks, if you watch the show, you know, I spent a lot of time right over there. Patiently and astutely scrutinizing the boxwood and mahogany chest set of the day's biggest stories developing the central headline pawns, definitely maneuvering an oso topical night to F6, fainting a classic Sicilian, nade door variation on the news, all the while seeing eight moves deep and patiently marshalling the latest press releases into a fisher's shows in Lip Nitsky attack that culminates in the elegant lethal slow-played, all-passant checkmate that is my nightly monologue. But sometimes, sometimes, folks, I. CHEERING AND APPLAUSE Sometimes I startle away, cubside down in the monkey bars of a condemned playground on a super fun site. Get all hept up on goofballs. Rummage that were discarded tag bag of defective toys. Yank out a fist bowl of disembodied doll limbs, toss them on Saturday, Rusty Cargo, container down by the Wharf, and challenge toothless drifters to the godless bughouse lets of tournament that is my segment. MUSIC Meanwhile!" # fmt: on processor = AutoProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model = model.to(torch_device) pipe = pipeline( "automatic-speech-recognition", model=model, tokenizer=processor.tokenizer, feature_extractor=processor.feature_extractor, max_new_tokens=128, device=torch_device, ) ds = load_dataset("distil-whisper/meanwhile", "default")["test"] ds = ds.cast_column("audio", Audio(sampling_rate=16000)) audio = ds[:1]["audio"] result = pipe(audio)[0]["text"] assert result == EXPECTED_RESULT @require_torch @slow def test_seamless_v2(self): pipe = pipeline( "automatic-speech-recognition", model="facebook/seamless-m4t-v2-large", device=torch_device, ) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") sample = dataset[0]["audio"] result = pipe(sample, generate_kwargs={"tgt_lang": "eng"}) EXPECTED_RESULT = "mister quilter is the apostle of the middle classes and we are glad to welcome his gospel" assert result["text"] == EXPECTED_RESULT @require_torch @slow def test_chunking_and_timestamps(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") tokenizer = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h") feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") speech_recognizer = pipeline( task="automatic-speech-recognition", model=model, tokenizer=tokenizer, feature_extractor=feature_extractor, framework="pt", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 10 audio_tiled = np.tile(audio, n_repeats) output = speech_recognizer([audio_tiled], batch_size=2) self.assertEqual(output, [{"text": ("A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats).strip()}]) output = speech_recognizer(audio, return_timestamps="char") self.assertEqual(audio.shape, (74_400,)) self.assertEqual(speech_recognizer.feature_extractor.sampling_rate, 16_000) # The audio is 74_400 / 16_000 = 4.65s long. self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": " ", "timestamp": (0.62, 0.66)}, {"text": "M", "timestamp": (0.68, 0.7)}, {"text": "A", "timestamp": (0.78, 0.8)}, {"text": "N", "timestamp": (0.84, 0.86)}, {"text": " ", "timestamp": (0.92, 0.98)}, {"text": "S", "timestamp": (1.06, 1.08)}, {"text": "A", "timestamp": (1.14, 1.16)}, {"text": "I", "timestamp": (1.16, 1.18)}, {"text": "D", "timestamp": (1.2, 1.24)}, {"text": " ", "timestamp": (1.24, 1.28)}, {"text": "T", "timestamp": (1.28, 1.32)}, {"text": "O", "timestamp": (1.34, 1.36)}, {"text": " ", "timestamp": (1.38, 1.42)}, {"text": "T", "timestamp": (1.42, 1.44)}, {"text": "H", "timestamp": (1.44, 1.46)}, {"text": "E", "timestamp": (1.46, 1.5)}, {"text": " ", "timestamp": (1.5, 1.56)}, {"text": "U", "timestamp": (1.58, 1.62)}, {"text": "N", "timestamp": (1.64, 1.68)}, {"text": "I", "timestamp": (1.7, 1.72)}, {"text": "V", "timestamp": (1.76, 1.78)}, {"text": "E", "timestamp": (1.84, 1.86)}, {"text": "R", "timestamp": (1.86, 1.9)}, {"text": "S", "timestamp": (1.96, 1.98)}, {"text": "E", "timestamp": (1.98, 2.02)}, {"text": " ", "timestamp": (2.02, 2.06)}, {"text": "S", "timestamp": (2.82, 2.86)}, {"text": "I", "timestamp": (2.94, 2.96)}, {"text": "R", "timestamp": (2.98, 3.02)}, {"text": " ", "timestamp": (3.06, 3.12)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": " ", "timestamp": (3.58, 3.6)}, {"text": "E", "timestamp": (3.66, 3.68)}, {"text": "X", "timestamp": (3.68, 3.7)}, {"text": "I", "timestamp": (3.9, 3.92)}, {"text": "S", "timestamp": (3.94, 3.96)}, {"text": "T", "timestamp": (4.0, 4.02)}, {"text": " ", "timestamp": (4.06, 4.1)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word") self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": "MAN", "timestamp": (0.68, 0.86)}, {"text": "SAID", "timestamp": (1.06, 1.24)}, {"text": "TO", "timestamp": (1.28, 1.36)}, {"text": "THE", "timestamp": (1.42, 1.5)}, {"text": "UNIVERSE", "timestamp": (1.58, 2.02)}, {"text": "SIR", "timestamp": (2.82, 3.02)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": "EXIST", "timestamp": (3.66, 4.02)}, ], }, ) output = speech_recognizer(audio, return_timestamps="word", chunk_length_s=2.0) self.assertEqual( output, { "text": "A MAN SAID TO THE UNIVERSE SIR I EXIST", "chunks": [ {"text": "A", "timestamp": (0.6, 0.62)}, {"text": "MAN", "timestamp": (0.68, 0.86)}, {"text": "SAID", "timestamp": (1.06, 1.24)}, {"text": "TO", "timestamp": (1.3, 1.36)}, {"text": "THE", "timestamp": (1.42, 1.48)}, {"text": "UNIVERSE", "timestamp": (1.58, 2.02)}, # Tiny change linked to chunking. {"text": "SIR", "timestamp": (2.84, 3.02)}, {"text": "I", "timestamp": (3.5, 3.52)}, {"text": "EXIST", "timestamp": (3.66, 4.02)}, ], }, ) # CTC models must specify return_timestamps type - cannot set `return_timestamps=True` blindly with self.assertRaisesRegex( ValueError, "^CTC can either predict character level timestamps, or word level timestamps. " "Set `return_timestamps='char'` or `return_timestamps='word'` as required.$", ): _ = speech_recognizer(audio, return_timestamps=True) @require_torch @slow def test_chunking_with_lm(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="patrickvonplaten/wav2vec2-base-100h-with-lm", chunk_length_s=10.0, ) ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] n_repeats = 10 audio = np.tile(audio, n_repeats) output = speech_recognizer([audio], batch_size=2) expected_text = "A MAN SAID TO THE UNIVERSE SIR I EXIST " * n_repeats expected = [{"text": expected_text.strip()}] self.assertEqual(output, expected) @require_torch def test_chunk_iterator(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") inputs = torch.arange(100).long() outs = list(chunk_iter(inputs, feature_extractor, 100, 0, 0)) self.assertEqual(len(outs), 1) self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)]) self.assertEqual([o["is_last"] for o in outs], [True]) # two chunks no stride outs = list(chunk_iter(inputs, feature_extractor, 50, 0, 0)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(50, 0, 0), (50, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 50), (1, 50)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) # two chunks incomplete last outs = list(chunk_iter(inputs, feature_extractor, 80, 0, 0)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(80, 0, 0), (20, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 20)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) # one chunk since first is also last, because it contains only data # in the right strided part we just mark that part as non stride # This test is specifically crafted to trigger a bug if next chunk # would be ignored by the fact that all the data would be # contained in the strided left data. outs = list(chunk_iter(inputs, feature_extractor, 105, 5, 5)) self.assertEqual(len(outs), 1) self.assertEqual([o["stride"] for o in outs], [(100, 0, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100)]) self.assertEqual([o["is_last"] for o in outs], [True]) @require_torch def test_chunk_iterator_stride(self): feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h") inputs = torch.arange(100).long() input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[ "input_values" ] outs = list(chunk_iter(inputs, feature_extractor, 100, 20, 10)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(100, 0, 10), (30, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 100), (1, 30)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) outs = list(chunk_iter(inputs, feature_extractor, 80, 20, 10)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(80, 0, 10), (50, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 80), (1, 50)]) self.assertEqual([o["is_last"] for o in outs], [False, True]) outs = list(chunk_iter(inputs, feature_extractor, 90, 20, 0)) self.assertEqual(len(outs), 2) self.assertEqual([o["stride"] for o in outs], [(90, 0, 0), (30, 20, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 90), (1, 30)]) outs = list(chunk_iter(inputs, feature_extractor, 36, 6, 6)) self.assertEqual(len(outs), 4) self.assertEqual([o["stride"] for o in outs], [(36, 0, 6), (36, 6, 6), (36, 6, 6), (28, 6, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 36), (1, 36), (1, 36), (1, 28)]) inputs = torch.LongTensor([i % 2 for i in range(100)]) input_values = feature_extractor(inputs, sampling_rate=feature_extractor.sampling_rate, return_tensors="pt")[ "input_values" ] outs = list(chunk_iter(inputs, feature_extractor, 30, 5, 5)) self.assertEqual(len(outs), 5) self.assertEqual([o["stride"] for o in outs], [(30, 0, 5), (30, 5, 5), (30, 5, 5), (30, 5, 5), (20, 5, 0)]) self.assertEqual([o["input_values"].shape for o in outs], [(1, 30), (1, 30), (1, 30), (1, 30), (1, 20)]) self.assertEqual([o["is_last"] for o in outs], [False, False, False, False, True]) # (0, 25) self.assertEqual(nested_simplify(input_values[:, :30]), nested_simplify(outs[0]["input_values"])) # (25, 45) self.assertEqual(nested_simplify(input_values[:, 20:50]), nested_simplify(outs[1]["input_values"])) # (45, 65) self.assertEqual(nested_simplify(input_values[:, 40:70]), nested_simplify(outs[2]["input_values"])) # (65, 85) self.assertEqual(nested_simplify(input_values[:, 60:90]), nested_simplify(outs[3]["input_values"])) # (85, 100) self.assertEqual(nested_simplify(input_values[:, 80:100]), nested_simplify(outs[4]["input_values"])) @require_torch def test_stride(self): speech_recognizer = pipeline( task="automatic-speech-recognition", model="hf-internal-testing/tiny-random-wav2vec2", ) waveform = np.tile(np.arange(1000, dtype=np.float32), 10) output = speech_recognizer({"raw": waveform, "stride": (0, 0), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "OB XB B EB BB B EB B OB X"}) # 0 effective ids Just take the middle one output = speech_recognizer({"raw": waveform, "stride": (5000, 5000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": ""}) # Only 1 arange. output = speech_recognizer({"raw": waveform, "stride": (0, 9000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "OB"}) # 2nd arange output = speech_recognizer({"raw": waveform, "stride": (1000, 8000), "sampling_rate": 16_000}) self.assertEqual(output, {"text": "XB"}) @slow @require_torch_accelerator def test_slow_unfinished_sequence(self): from transformers import GenerationConfig pipe = pipeline( "automatic-speech-recognition", model="vasista22/whisper-hindi-large-v2", device=torch_device, ) # Original model wasn't trained with timestamps and has incorrect generation config pipe.model.generation_config = GenerationConfig.from_pretrained("openai/whisper-large-v2") # the audio is 4 seconds long audio = hf_hub_download("Narsil/asr_dummy", filename="hindi.ogg", repo_type="dataset") out = pipe( audio, return_timestamps=True, ) self.assertEqual( out, { "text": "मिर्ची में कितने विभिन्न प्रजातियां हैं", "chunks": [{"timestamp": (0.58, None), "text": "मिर्ची में कितने विभिन्न प्रजातियां हैं"}], }, ) def require_ffmpeg(test_case): """ Decorator marking a test that requires FFmpeg. These tests are skipped when FFmpeg isn't installed. """ import subprocess try: subprocess.check_output(["ffmpeg", "-h"], stderr=subprocess.DEVNULL) return test_case except Exception: return unittest.skip("test requires ffmpeg")(test_case) def bytes_iter(chunk_size, chunks): for i in range(chunks): yield bytes(range(i * chunk_size, (i + 1) * chunk_size)) @require_ffmpeg class AudioUtilsTest(unittest.TestCase): def test_chunk_bytes_iter_too_big(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 10, stride=(0, 0))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(0, 0))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0)}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (0, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter_stride(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 3, stride=(1, 1))) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 1)}) self.assertEqual(next(iter_), {"raw": b"\x01\x02\x03", "stride": (1, 1)}) self.assertEqual(next(iter_), {"raw": b"\x02\x03\x04", "stride": (1, 1)}) # This is finished, but the chunk_bytes doesn't know it yet. self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 1)}) self.assertEqual(next(iter_), {"raw": b"\x04\x05", "stride": (1, 0)}) with self.assertRaises(StopIteration): next(iter_) def test_chunk_bytes_iter_stride_stream(self): iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=2), 5, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05", "stride": (1, 0), "partial": False}) with self.assertRaises(StopIteration): next(iter_) iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 5, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04", "stride": (0, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x03\x04\x05\x06\x07", "stride": (1, 1), "partial": False}) self.assertEqual(next(iter_), {"raw": b"\x06\x07\x08", "stride": (1, 0), "partial": False}) with self.assertRaises(StopIteration): next(iter_) iter_ = iter(chunk_bytes_iter(bytes_iter(chunk_size=3, chunks=3), 10, stride=(1, 1), stream=True)) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02", "stride": (0, 0), "partial": True}) self.assertEqual(next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05", "stride": (0, 0), "partial": True}) self.assertEqual( next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": True} ) self.assertEqual( next(iter_), {"raw": b"\x00\x01\x02\x03\x04\x05\x06\x07\x08", "stride": (0, 0), "partial": False} ) with self.assertRaises(StopIteration): next(iter_)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_question_answering.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, LxmertConfig, QuestionAnsweringPipeline, ) from transformers.data.processors.squad import SquadExample from transformers.pipelines import QuestionAnsweringArgumentHandler, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_torch_or_tf, slow, ) from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. _TO_SKIP = {"LayoutLMv2Config", "LayoutLMv3Config"} @is_pipeline_test class QAPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_QUESTION_ANSWERING_MAPPING tf_model_mapping = TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING if model_mapping is not None: model_mapping = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: tf_model_mapping = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def get_test_pipeline(self, model, tokenizer, processor): if isinstance(model.config, LxmertConfig): # This is an bimodal model, we need to find a more consistent way # to switch on those models. return None, None question_answerer = QuestionAnsweringPipeline(model, tokenizer) examples = [ {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."}, {"question": "In what field is HuggingFace ?", "context": "HuggingFace is an AI startup."}, ] return question_answerer, examples def run_pipeline_test(self, question_answerer, _): outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", handle_impossible_answer=True, ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) outputs = question_answerer( question=["In what field is HuggingFace working ?", "In what field is HuggingFace working ?"], context="HuggingFace was founded in Paris.", ) self.assertEqual( outputs, [ {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, ], ) outputs = question_answerer( question=["What field is HuggingFace working ?", "In what field is HuggingFace ?"], context=[ "HuggingFace is a startup based in New-York", "HuggingFace is a startup founded in Paris", ], ) self.assertEqual( outputs, [ {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}, ], ) with self.assertRaises(ValueError): question_answerer(question="", context="HuggingFace was founded in Paris.") with self.assertRaises(ValueError): question_answerer(question=None, context="HuggingFace was founded in Paris.") with self.assertRaises(ValueError): question_answerer(question="In what field is HuggingFace working ?", context="") with self.assertRaises(ValueError): question_answerer(question="In what field is HuggingFace working ?", context=None) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris.", top_k=20 ) self.assertEqual( outputs, [{"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)} for i in range(20)] ) # Very long context require multiple features outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20 ) self.assertEqual(outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) # Using batch is OK if question_answerer.tokenizer.pad_token_id is None: question_answerer.tokenizer.pad_token_id = question_answerer.model.config.eos_token_id new_outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." * 20, batch_size=2 ) self.assertEqual(new_outputs, {"answer": ANY(str), "start": ANY(int), "end": ANY(int), "score": ANY(float)}) self.assertEqual(nested_simplify(outputs), nested_simplify(new_outputs)) @require_torch def test_small_model_pt(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad" ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_iterator(self): # https://github.com/huggingface/transformers/issues/18510 pipe = pipeline(model="sshleifer/tiny-distilbert-base-cased-distilled-squad", batch_size=16, framework="pt") def data(): for i in range(10): yield {"question": "Where was HuggingFace founded ?", "context": "HuggingFace was founded in Paris."} for outputs in pipe(data()): self.assertEqual(nested_simplify(outputs), {"score": 0.01, "start": 0, "end": 11, "answer": "HuggingFace"}) @require_torch def test_small_model_pt_softmax_trick(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad" ) real_postprocess = question_answerer.postprocess # Tweak start and stop to make sure we encounter the softmax logits # bug. def ensure_large_logits_postprocess( model_outputs, top_k=1, handle_impossible_answer=False, max_answer_len=15, ): for output in model_outputs: output["start"] = output["start"] * 1e6 output["end"] = output["end"] * 1e6 return real_postprocess( model_outputs, top_k=top_k, handle_impossible_answer=handle_impossible_answer, max_answer_len=max_answer_len, ) question_answerer.postprocess = ensure_large_logits_postprocess outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.028, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch def test_small_model_japanese(self): question_answerer = pipeline( "question-answering", model="KoichiYasuoka/deberta-base-japanese-aozora-ud-head", ) output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている") # fmt: skip # Wrong answer, the whole text is identified as one "word" since the tokenizer does not include # a pretokenizer self.assertEqual(nested_simplify(output),{"score": 1.0, "start": 0, "end": 30, "answer": "全学年にわたって小学校の国語の教科書に挿し絵が用いられている"}) # fmt: skip # Disable word alignment output = question_answerer(question="国語", context="全学年にわたって小学校の国語の教科書に挿し絵が用いられている", align_to_words=False) # fmt: skip self.assertEqual( nested_simplify(output), {"score": 1.0, "start": 15, "end": 18, "answer": "教科書"}, ) @slow @require_torch def test_small_model_long_context_cls_slow(self): question_answerer = pipeline( "question-answering", model="deepset/roberta-base-squad2", handle_impossible_answer=True, max_seq_length=512, ) outputs = question_answerer( question="What country is Paris the capital of?", context="""London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games. London is the capital and largest city of England and the United Kingdom. It stands on the River Thames in south-east England at the head of a 50-mile (80 km) estuary down to the North Sea, and has been a major settlement for two millennia. The City of London, its ancient core and financial centre, was founded by the Romans as Londinium and retains boundaries close to its medieval ones. Since the 19th century, \"London\" has also referred to the metropolis around this core, historically split between the counties of Middlesex, Essex, Surrey, Kent, and Hertfordshire, which largely comprises Greater London, governed by the Greater London Authority. The City of Westminster, to the west of the City of London, has for centuries held the national government and parliament. As one of the world's global cities, London exerts strong influence on its arts, commerce, education, entertainment, fashion, finance, health care, media, tourism, and communications, and has sometimes been called the capital of the world. Its GDP (€801.66 billion in 2017) makes it the biggest urban economy in Europe, and it is one of the major financial centres in the world. In 2019 it had the second-highest number of ultra high-net-worth individuals in Europe after Paris and the second-highest number of billionaires in Europe after Moscow. As of 2021, London has the most millionaires of any city. With Europe's largest concentration of higher education institutions, it includes Imperial College London in natural and applied sciences, the London School of Economics in social sciences, and the comprehensive University College London. The city is home to the most 5-star hotels of any city in the world. In 2012, London became the first city to host three Summer Olympic Games.""", ) self.assertEqual(nested_simplify(outputs), {"score": 0.988, "start": 0, "end": 0, "answer": ""}) @require_tf def test_small_model_tf(self): question_answerer = pipeline( "question-answering", model="sshleifer/tiny-distilbert-base-cased-distilled-squad", framework="tf" ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.011, "start": 0, "end": 11, "answer": "HuggingFace"}) @slow @require_torch def test_large_model_pt(self): question_answerer = pipeline( "question-answering", ) outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) @slow @require_torch def test_large_model_issue(self): qa_pipeline = pipeline( "question-answering", model="mrm8488/bert-multi-cased-finetuned-xquadv1", ) outputs = qa_pipeline( { "context": ( "Yes Bank founder Rana Kapoor has approached the Bombay High Court, challenging a special court's" " order from August this year that had remanded him in police custody for a week in a multi-crore" " loan fraud case. Kapoor, who is currently lodged in Taloja Jail, is an accused in the loan fraud" " case and some related matters being probed by the CBI and Enforcement Directorate. A single" " bench presided over by Justice S K Shinde on Tuesday posted the plea for further hearing on" " October 14. In his plea filed through advocate Vijay Agarwal, Kapoor claimed that the special" " court's order permitting the CBI's request for police custody on August 14 was illegal and in" " breach of the due process of law. Therefore, his police custody and subsequent judicial custody" " in the case were all illegal. Kapoor has urged the High Court to quash and set aside the special" " court's order dated August 14. As per his plea, in August this year, the CBI had moved two" " applications before the special court, one seeking permission to arrest Kapoor, who was already" " in judicial custody at the time in another case, and the other, seeking his police custody." " While the special court refused to grant permission to the CBI to arrest Kapoor, it granted the" " central agency's plea for his custody. Kapoor, however, said in his plea that before filing an" " application for his arrest, the CBI had not followed the process of issuing him a notice under" " Section 41 of the CrPC for appearance before it. He further said that the CBI had not taken" " prior sanction as mandated under section 17 A of the Prevention of Corruption Act for" " prosecuting him. The special court, however, had said in its order at the time that as Kapoor" " was already in judicial custody in another case and was not a free man the procedure mandated" " under Section 41 of the CrPC need not have been adhered to as far as issuing a prior notice of" " appearance was concerned. ADVERTISING It had also said that case records showed that the" " investigating officer had taken an approval from a managing director of Yes Bank before" " beginning the proceedings against Kapoor and such a permission was a valid sanction. However," " Kapoor in his plea said that the above order was bad in law and sought that it be quashed and" " set aside. The law mandated that if initial action was not in consonance with legal procedures," " then all subsequent actions must be held as illegal, he said, urging the High Court to declare" " the CBI remand and custody and all subsequent proceedings including the further custody as" " illegal and void ab-initio. In a separate plea before the High Court, Kapoor's daughter Rakhee" " Kapoor-Tandon has sought exemption from in-person appearance before a special PMLA court. Rakhee" " has stated that she is a resident of the United Kingdom and is unable to travel to India owing" " to restrictions imposed due to the COVID-19 pandemic. According to the CBI, in the present case," " Kapoor had obtained a gratification or pecuniary advantage of ₹ 307 crore, and thereby caused" " Yes Bank a loss of ₹ 1,800 crore by extending credit facilities to Avantha Group, when it was" " not eligible for the same" ), "question": "Is this person invovled in fraud?", } ) self.assertEqual( nested_simplify(outputs), {"answer": "an accused in the loan fraud case", "end": 294, "score": 0.001, "start": 261}, ) @slow @require_torch def test_large_model_course(self): question_answerer = pipeline("question-answering") long_context = """ 🤗 Transformers: State of the Art NLP 🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. 🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. Why should I use transformers? 1. Easy-to-use state-of-the-art models: - High performance on NLU and NLG tasks. - Low barrier to entry for educators and practitioners. - Few user-facing abstractions with just three classes to learn. - A unified API for using all our pretrained models. - Lower compute costs, smaller carbon footprint: 2. Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages. 3. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - Seamlessly pick the right framework for training, evaluation and production. 4. Easily customize a model or an example to your needs: - We provide examples for each architecture to reproduce the results published by its original authors. - Model internals are exposed as consistently as possible. - Model files can be used independently of the library for quick experiments. 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question = "Which deep learning libraries back 🤗 Transformers?" outputs = question_answerer(question=question, context=long_context) self.assertEqual( nested_simplify(outputs), {"answer": "Jax, PyTorch and TensorFlow", "end": 1919, "score": 0.971, "start": 1892}, ) @slow @require_tf def test_large_model_tf(self): question_answerer = pipeline("question-answering", framework="tf") outputs = question_answerer( question="Where was HuggingFace founded ?", context="HuggingFace was founded in Paris." ) self.assertEqual(nested_simplify(outputs), {"score": 0.979, "start": 27, "end": 32, "answer": "Paris"}) @require_torch_or_tf class QuestionAnsweringArgumentHandlerTests(unittest.TestCase): def test_argument_handler(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" normalized = qa(Q, C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=Q, context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=Q, context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(question=[Q, Q], context=C) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa({"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa([{"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa([{"question": Q, "context": C}, {"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(X={"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(X=[{"question": Q, "context": C}]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) normalized = qa(data={"question": Q, "context": C}) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 1) self.assertEqual({type(el) for el in normalized}, {SquadExample}) def test_argument_handler_error_handling(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" with self.assertRaises(KeyError): qa({"context": C}) with self.assertRaises(KeyError): qa({"question": Q}) with self.assertRaises(KeyError): qa([{"context": C}]) with self.assertRaises(ValueError): qa(None, C) with self.assertRaises(ValueError): qa("", C) with self.assertRaises(ValueError): qa(Q, None) with self.assertRaises(ValueError): qa(Q, "") with self.assertRaises(ValueError): qa(question=None, context=C) with self.assertRaises(ValueError): qa(question="", context=C) with self.assertRaises(ValueError): qa(question=Q, context=None) with self.assertRaises(ValueError): qa(question=Q, context="") with self.assertRaises(ValueError): qa({"question": None, "context": C}) with self.assertRaises(ValueError): qa({"question": "", "context": C}) with self.assertRaises(ValueError): qa({"question": Q, "context": None}) with self.assertRaises(ValueError): qa({"question": Q, "context": ""}) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": None, "context": C}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": "", "context": C}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": Q, "context": None}]) with self.assertRaises(ValueError): qa([{"question": Q, "context": C}, {"question": Q, "context": ""}]) with self.assertRaises(ValueError): qa(question={"This": "Is weird"}, context="This is a context") with self.assertRaises(ValueError): qa(question=[Q, Q], context=[C, C, C]) with self.assertRaises(ValueError): qa(question=[Q, Q, Q], context=[C, C]) def test_argument_handler_old_format(self): qa = QuestionAnsweringArgumentHandler() Q = "Where was HuggingFace founded ?" C = "HuggingFace was founded in Paris" # Backward compatibility for this normalized = qa(question=[Q, Q], context=[C, C]) self.assertEqual(type(normalized), list) self.assertEqual(len(normalized), 2) self.assertEqual({type(el) for el in normalized}, {SquadExample}) def test_argument_handler_error_handling_odd(self): qa = QuestionAnsweringArgumentHandler() with self.assertRaises(ValueError): qa(None) with self.assertRaises(ValueError): qa(Y=None) with self.assertRaises(ValueError): qa(1)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_common.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import logging import os import sys import tempfile import unittest from pathlib import Path import datasets import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, DistilBertForSequenceClassification, TextClassificationPipeline, TFAutoModelForSequenceClassification, pipeline, ) from transformers.pipelines import PIPELINE_REGISTRY, get_task from transformers.pipelines.base import Pipeline, _pad from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, RequestCounter, backend_empty_cache, is_pipeline_test, is_staging_test, nested_simplify, require_tensorflow_probability, require_tf, require_torch, require_torch_accelerator, require_torch_or_tf, slow, torch_device, ) from transformers.utils import direct_transformers_import, is_tf_available, is_torch_available from transformers.utils import logging as transformers_logging sys.path.append(str(Path(__file__).parent.parent.parent / "utils")) from test_module.custom_pipeline import PairClassificationPipeline # noqa E402 logger = logging.getLogger(__name__) PATH_TO_TRANSFORMERS = os.path.join(Path(__file__).parent.parent.parent, "src/transformers") # Dynamically import the Transformers module to grab the attribute classes of the processor form their names. transformers_module = direct_transformers_import(PATH_TO_TRANSFORMERS) class ANY: def __init__(self, *_types): self._types = _types def __eq__(self, other): return isinstance(other, self._types) def __repr__(self): return f"ANY({', '.join(_type.__name__ for _type in self._types)})" @is_pipeline_test class CommonPipelineTest(unittest.TestCase): @require_torch def test_pipeline_iteration(self): from torch.utils.data import Dataset class MyDataset(Dataset): data = [ "This is a test", "This restaurant is great", "This restaurant is awful", ] def __len__(self): return 3 def __getitem__(self, i): return self.data[i] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) dataset = MyDataset() for output in text_classifier(dataset): self.assertEqual(output, {"label": ANY(str), "score": ANY(float)}) @require_torch def test_check_task_auto_inference(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertIsInstance(pipe, TextClassificationPipeline) @require_torch def test_pipeline_batch_size_global(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") self.assertEqual(pipe._batch_size, None) self.assertEqual(pipe._num_workers, None) pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", batch_size=2, num_workers=1) self.assertEqual(pipe._batch_size, 2) self.assertEqual(pipe._num_workers, 1) @require_torch def test_pipeline_pathlike(self): pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") with tempfile.TemporaryDirectory() as d: pipe.save_pretrained(d) path = Path(d) newpipe = pipeline(task="text-classification", model=path) self.assertIsInstance(newpipe, TextClassificationPipeline) @require_torch def test_pipeline_override(self): class MyPipeline(TextClassificationPipeline): pass text_classifier = pipeline(model="hf-internal-testing/tiny-random-distilbert", pipeline_class=MyPipeline) self.assertIsInstance(text_classifier, MyPipeline) def test_check_task(self): task = get_task("openai-community/gpt2") self.assertEqual(task, "text-generation") with self.assertRaises(RuntimeError): # Wrong framework get_task("espnet/siddhana_slurp_entity_asr_train_asr_conformer_raw_en_word_valid.acc.ave_10best") @require_torch def test_iterator_data(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) # When using multiple workers on streamable data it should still work # This will force using `num_workers=1` with a warning for now. results = [] for out in pipe(data(10), num_workers=2): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_tf def test_iterator_data_tf(self): def data(n: int): for _ in range(n): yield "This is a test" pipe = pipeline(model="hf-internal-testing/tiny-random-distilbert", framework="tf") out = pipe("This is a test") results = [] for out in pipe(data(10)): self.assertEqual(nested_simplify(out), {"label": "LABEL_0", "score": 0.504}) results.append(out) self.assertEqual(len(results), 10) @require_torch def test_unbatch_attentions_hidden_states(self): model = DistilBertForSequenceClassification.from_pretrained( "hf-internal-testing/tiny-random-distilbert", output_hidden_states=True, output_attentions=True ) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-distilbert") text_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer) # Used to throw an error because `hidden_states` are a tuple of tensors # instead of the expected tensor. outputs = text_classifier(["This is great !"] * 20, batch_size=32) self.assertEqual(len(outputs), 20) @require_torch def test_torch_dtype_property(self): import torch model_id = "hf-internal-testing/tiny-random-distilbert" # If dtype is specified in the pipeline constructor, the property should return that type pipe = pipeline(model=model_id, torch_dtype=torch.float16) self.assertEqual(pipe.torch_dtype, torch.float16) # If the underlying model changes dtype, the property should return the new type pipe.model.to(torch.bfloat16) self.assertEqual(pipe.torch_dtype, torch.bfloat16) # If dtype is NOT specified in the pipeline constructor, the property should just return # the dtype of the underlying model (default) pipe = pipeline(model=model_id) self.assertEqual(pipe.torch_dtype, torch.float32) # If underlying model doesn't have dtype property, simply return None pipe.model = None self.assertIsNone(pipe.torch_dtype) @is_pipeline_test class PipelineScikitCompatTest(unittest.TestCase): @require_torch def test_pipeline_predict_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_predict_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.predict(data) self.assertEqual(expected_output, actual_output) @require_torch def test_pipeline_transform_pt(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @require_tf def test_pipeline_transform_tf(self): data = ["This is a test"] text_classifier = pipeline( task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) expected_output = [{"label": ANY(str), "score": ANY(float)}] actual_output = text_classifier.transform(data) self.assertEqual(expected_output, actual_output) @is_pipeline_test class PipelinePadTest(unittest.TestCase): @require_torch def test_pipeline_padding(self): import torch items = [ { "label": "label1", "input_ids": torch.LongTensor([[1, 23, 24, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 0]]), }, { "label": "label2", "input_ids": torch.LongTensor([[1, 23, 24, 43, 44, 2]]), "attention_mask": torch.LongTensor([[0, 1, 1, 1, 1, 0]]), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "right"), torch.LongTensor([[1, 23, 24, 2, 10, 10], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "input_ids", 10, "left"), torch.LongTensor([[10, 10, 1, 23, 24, 2], [1, 23, 24, 43, 44, 2]]), ) ) self.assertTrue( torch.allclose( _pad(items, "attention_mask", 0, "right"), torch.LongTensor([[0, 1, 1, 0, 0, 0], [0, 1, 1, 1, 1, 0]]) ) ) @require_torch def test_pipeline_image_padding(self): import torch items = [ { "label": "label1", "pixel_values": torch.zeros((1, 3, 10, 10)), }, { "label": "label2", "pixel_values": torch.zeros((1, 3, 10, 10)), }, ] self.assertEqual(_pad(items, "label", 0, "right"), ["label1", "label2"]) self.assertTrue( torch.allclose( _pad(items, "pixel_values", 10, "right"), torch.zeros((2, 3, 10, 10)), ) ) @require_torch def test_pipeline_offset_mapping(self): import torch items = [ { "offset_mappings": torch.zeros([1, 11, 2], dtype=torch.long), }, { "offset_mappings": torch.zeros([1, 4, 2], dtype=torch.long), }, ] self.assertTrue( torch.allclose( _pad(items, "offset_mappings", 0, "right"), torch.zeros((2, 11, 2), dtype=torch.long), ), ) @is_pipeline_test class PipelineUtilsTest(unittest.TestCase): @require_torch def test_pipeline_dataset(self): from transformers.pipelines.pt_utils import PipelineDataset dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineDataset(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = [dataset[i] for i in range(4)] self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [0, 1, 2, 3] def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}) self.assertEqual(len(dataset), 4) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_iterator_no_len(self): from transformers.pipelines.pt_utils import PipelineIterator def dummy_dataset(): for i in range(4): yield i def add(number, extra=0): return number + extra dataset = PipelineIterator(dummy_dataset(), add, {"extra": 2}) with self.assertRaises(TypeError): len(dataset) outputs = list(dataset) self.assertEqual(outputs, [2, 3, 4, 5]) @require_torch def test_pipeline_batch_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": [0, 1, 2]}, {"id": [3]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]]} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]) @require_torch def test_pipeline_batch_unbatch_iterator_tensors(self): import torch from transformers.pipelines.pt_utils import PipelineIterator dummy_dataset = [{"id": torch.LongTensor([[10, 20], [0, 1], [0, 2]])}, {"id": torch.LongTensor([[3]])}] def add(number, extra=0): return {"id": number["id"] + extra} dataset = PipelineIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual( nested_simplify(outputs), [{"id": [[12, 22]]}, {"id": [[2, 3]]}, {"id": [[2, 4]]}, {"id": [[5]]}] ) @require_torch def test_pipeline_chunk_iterator(self): from transformers.pipelines.pt_utils import PipelineChunkIterator def preprocess_chunk(n: int): for i in range(n): yield i dataset = [2, 3] dataset = PipelineChunkIterator(dataset, preprocess_chunk, {}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [0, 1, 0, 1, 2]) @require_torch def test_pipeline_pack_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator def pack(item): return {"id": item["id"] + 1, "is_last": item["is_last"]} dataset = [ {"id": 0, "is_last": False}, {"id": 1, "is_last": True}, {"id": 0, "is_last": False}, {"id": 1, "is_last": False}, {"id": 2, "is_last": True}, ] dataset = PipelinePackIterator(dataset, pack, {}) outputs = list(dataset) self.assertEqual( outputs, [ [ {"id": 1}, {"id": 2}, ], [ {"id": 1}, {"id": 2}, {"id": 3}, ], ], ) @require_torch def test_pipeline_pack_unbatch_iterator(self): from transformers.pipelines.pt_utils import PipelinePackIterator dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, True, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}], [{"id": 4}, {"id": 5}]]) # is_false Across batch dummy_dataset = [{"id": [0, 1, 2], "is_last": [False, False, False]}, {"id": [3], "is_last": [True]}] def add(number, extra=0): return {"id": [i + extra for i in number["id"]], "is_last": number["is_last"]} dataset = PipelinePackIterator(dummy_dataset, add, {"extra": 2}, loader_batch_size=3) outputs = list(dataset) self.assertEqual(outputs, [[{"id": 2}, {"id": 3}, {"id": 4}, {"id": 5}]]) def test_pipeline_negative_device(self): # To avoid regressing, pipeline used to accept device=-1 classifier = pipeline("text-generation", "hf-internal-testing/tiny-random-bert", device=-1) expected_output = [{"generated_text": ANY(str)}] actual_output = classifier("Test input.") self.assertEqual(expected_output, actual_output) @slow @require_torch def test_load_default_pipelines_pt(self): import torch from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_tf def test_load_default_pipelines_tf(self): from transformers.modeling_tf_utils import keras from transformers.pipelines import SUPPORTED_TASKS set_seed_fn = lambda: keras.utils.set_random_seed(0) # noqa: E731 for task in SUPPORTED_TASKS.keys(): if task == "table-question-answering": # test table in seperate test due to more dependencies continue self.check_default_pipeline(task, "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by TF gc.collect() @slow @require_torch def test_load_default_pipelines_pt_table_qa(self): import torch set_seed_fn = lambda: torch.manual_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "pt", set_seed_fn, self.check_models_equal_pt) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() backend_empty_cache(torch_device) @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_torch @require_torch_accelerator def test_pipeline_accelerator_indexed(self): pipe = pipeline("text-generation", device=torch_device) _ = pipe("Hello") @slow @require_tf @require_tensorflow_probability def test_load_default_pipelines_tf_table_qa(self): import tensorflow as tf set_seed_fn = lambda: tf.random.set_seed(0) # noqa: E731 self.check_default_pipeline("table-question-answering", "tf", set_seed_fn, self.check_models_equal_tf) # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() def check_default_pipeline(self, task, framework, set_seed_fn, check_models_equal_fn): from transformers.pipelines import SUPPORTED_TASKS, pipeline task_dict = SUPPORTED_TASKS[task] # test to compare pipeline to manually loading the respective model model = None relevant_auto_classes = task_dict[framework] if len(relevant_auto_classes) == 0: # task has no default logger.debug(f"{task} in {framework} has no default") return # by default use first class auto_model_cls = relevant_auto_classes[0] # retrieve correct model ids if task == "translation": # special case for translation pipeline which has multiple languages model_ids = [] revisions = [] tasks = [] for translation_pair in task_dict["default"].keys(): model_id, revision = task_dict["default"][translation_pair]["model"][framework] model_ids.append(model_id) revisions.append(revision) tasks.append(task + f"_{'_to_'.join(translation_pair)}") else: # normal case - non-translation pipeline model_id, revision = task_dict["default"]["model"][framework] model_ids = [model_id] revisions = [revision] tasks = [task] # check for equality for model_id, revision, task in zip(model_ids, revisions, tasks): # load default model try: set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) except ValueError: # first auto class is possible not compatible with model, go to next model class auto_model_cls = relevant_auto_classes[1] set_seed_fn() model = auto_model_cls.from_pretrained(model_id, revision=revision) # load default pipeline set_seed_fn() default_pipeline = pipeline(task, framework=framework) # compare pipeline model with default model models_are_equal = check_models_equal_fn(default_pipeline.model, model) self.assertTrue(models_are_equal, f"{task} model doesn't match pipeline.") logger.debug(f"{task} in {framework} succeeded with {model_id}.") def check_models_equal_pt(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.parameters(), model2.parameters()): if model1_p.data.ne(model2_p.data).sum() > 0: models_are_equal = False return models_are_equal def check_models_equal_tf(self, model1, model2): models_are_equal = True for model1_p, model2_p in zip(model1.weights, model2.weights): if np.abs(model1_p.numpy() - model2_p.numpy()).sum() > 1e-5: models_are_equal = False return models_are_equal class CustomPipeline(Pipeline): def _sanitize_parameters(self, **kwargs): preprocess_kwargs = {} if "maybe_arg" in kwargs: preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"] return preprocess_kwargs, {}, {} def preprocess(self, text, maybe_arg=2): input_ids = self.tokenizer(text, return_tensors="pt") return input_ids def _forward(self, model_inputs): outputs = self.model(**model_inputs) return outputs def postprocess(self, model_outputs): return model_outputs["logits"].softmax(-1).numpy() @is_pipeline_test class CustomPipelineTest(unittest.TestCase): def test_warning_logs(self): transformers_logging.set_verbosity_debug() logger_ = transformers_logging.get_logger("transformers.pipelines.base") alias = "text-classification" # Get the original task, so we can restore it at the end. # (otherwise the subsequential tests in `TextClassificationPipelineTests` will fail) _, original_task, _ = PIPELINE_REGISTRY.check_task(alias) try: with CaptureLogger(logger_) as cm: PIPELINE_REGISTRY.register_pipeline(alias, PairClassificationPipeline) self.assertIn(f"{alias} is already registered", cm.out) finally: # restore PIPELINE_REGISTRY.supported_tasks[alias] = original_task def test_register_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "custom-text-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, default={"pt": "hf-internal-testing/tiny-random-distilbert"}, type="text", ) assert "custom-text-classification" in PIPELINE_REGISTRY.get_supported_tasks() _, task_def, _ = PIPELINE_REGISTRY.check_task("custom-text-classification") self.assertEqual(task_def["pt"], (AutoModelForSequenceClassification,) if is_torch_available() else ()) self.assertEqual(task_def["tf"], (TFAutoModelForSequenceClassification,) if is_tf_available() else ()) self.assertEqual(task_def["type"], "text") self.assertEqual(task_def["impl"], PairClassificationPipeline) self.assertEqual(task_def["default"], {"model": {"pt": "hf-internal-testing/tiny-random-distilbert"}}) # Clean registry for next tests. del PIPELINE_REGISTRY.supported_tasks["custom-text-classification"] @require_torch_or_tf def test_dynamic_pipeline(self): PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification if is_torch_available() else None, tf_model=TFAutoModelForSequenceClassification if is_tf_available() else None, ) classifier = pipeline("pair-classification", model="hf-internal-testing/tiny-random-bert") # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] with tempfile.TemporaryDirectory() as tmp_dir: classifier.save_pretrained(tmp_dir) # checks self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",) if is_torch_available() else (), "tf": ("TFAutoModelForSequenceClassification",) if is_tf_available() else (), } }, ) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=tmp_dir) new_classifier = pipeline(model=tmp_dir, trust_remote_code=True) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline("text-classification", model=tmp_dir, trust_remote_code=False) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") self.assertEqual(new_classifier.task, "pair-classification") results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual( nested_simplify(results), {"label": "LABEL_0", "score": 0.505, "logits": [-0.003, -0.024]}, ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify(results), [{"label": "LABEL_0", "score": 0.505}], ) @require_torch_or_tf def test_cached_pipeline_has_minimum_calls_to_head(self): # Make sure we have cached the pipeline. _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") with RequestCounter() as counter: _ = pipeline("text-classification", model="hf-internal-testing/tiny-random-bert") self.assertEqual(counter["GET"], 0) self.assertEqual(counter["HEAD"], 1) self.assertEqual(counter.total_calls, 1) @require_torch def test_chunk_pipeline_batching_single_file(self): # Make sure we have cached the pipeline. pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation").sort("id") audio = ds[40]["audio"]["array"] pipe = pipeline(model="hf-internal-testing/tiny-random-Wav2Vec2ForCTC") # For some reason scoping doesn't work if not using `self.` self.COUNT = 0 forward = pipe.model.forward def new_forward(*args, **kwargs): self.COUNT += 1 return forward(*args, **kwargs) pipe.model.forward = new_forward for out in pipe(audio, return_timestamps="char", chunk_length_s=3, stride_length_s=[1, 1], batch_size=1024): pass self.assertEqual(self.COUNT, 1) @require_torch @is_staging_test class DynamicPipelineTester(unittest.TestCase): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "I", "love", "hate", "you"] @classmethod def setUpClass(cls): cls._token = TOKEN HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-dynamic-pipeline") except HTTPError: pass def test_push_to_hub_dynamic_pipeline(self): from transformers import BertConfig, BertForSequenceClassification, BertTokenizer PIPELINE_REGISTRY.register_pipeline( "pair-classification", pipeline_class=PairClassificationPipeline, pt_model=AutoModelForSequenceClassification, ) config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = BertForSequenceClassification(config).eval() with tempfile.TemporaryDirectory() as tmp_dir: vocab_file = os.path.join(tmp_dir, "vocab.txt") with open(vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens])) tokenizer = BertTokenizer(vocab_file) classifier = pipeline("pair-classification", model=model, tokenizer=tokenizer) # Clean registry as we won't need the pipeline to be in it for the rest to work. del PIPELINE_REGISTRY.supported_tasks["pair-classification"] classifier.save_pretrained(tmp_dir) # checks if the configuration has been added after calling the save_pretrained method self.assertDictEqual( classifier.model.config.custom_pipelines, { "pair-classification": { "impl": "custom_pipeline.PairClassificationPipeline", "pt": ("AutoModelForSequenceClassification",), "tf": (), } }, ) # use push_to_hub method to push the pipeline classifier.push_to_hub(f"{USER}/test-dynamic-pipeline", token=self._token) # Fails if the user forget to pass along `trust_remote_code=True` with self.assertRaises(ValueError): _ = pipeline(model=f"{USER}/test-dynamic-pipeline") new_classifier = pipeline(model=f"{USER}/test-dynamic-pipeline", trust_remote_code=True) # Can't make an isinstance check because the new_classifier is from the PairClassificationPipeline class of a # dynamic module self.assertEqual(new_classifier.__class__.__name__, "PairClassificationPipeline") results = classifier("I hate you", second_text="I love you") new_results = new_classifier("I hate you", second_text="I love you") self.assertDictEqual(nested_simplify(results), nested_simplify(new_results)) # Using trust_remote_code=False forces the traditional pipeline tag old_classifier = pipeline( "text-classification", model=f"{USER}/test-dynamic-pipeline", trust_remote_code=False ) self.assertEqual(old_classifier.__class__.__name__, "TextClassificationPipeline") self.assertEqual(old_classifier.task, "text-classification") new_results = old_classifier("I hate you", text_pair="I love you") self.assertListEqual( nested_simplify([{"label": results["label"], "score": results["score"]}]), nested_simplify(new_results) )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_depth_estimation.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from huggingface_hub.utils import insecure_hashlib from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class DepthEstimationPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DEPTH_ESTIMATION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): depth_estimator = DepthEstimationPipeline(model=model, image_processor=processor) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, depth_estimator, examples): outputs = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png") self.assertEqual({"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, outputs) import datasets # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") outputs = depth_estimator( [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["image"], # LA dataset[1]["image"], # L dataset[2]["image"], ] ) self.assertEqual( [ {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, {"predicted_depth": ANY(torch.Tensor), "depth": ANY(Image.Image)}, ], outputs, ) @require_tf @unittest.skip("Depth estimation is not implemented in TF") def test_small_model_tf(self): pass @slow @require_torch def test_large_model_pt(self): model_id = "Intel/dpt-large" depth_estimator = pipeline("depth-estimation", model=model_id) outputs = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg") outputs["depth"] = hashimage(outputs["depth"]) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item()), 29.304) self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item()), 2.662) @require_torch def test_small_model_pt(self): # This is highly irregular to have no small tests. self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT")
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_table_question_answering.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import ( MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, AutoModelForTableQuestionAnswering, AutoTokenizer, TableQuestionAnsweringPipeline, TFAutoModelForTableQuestionAnswering, is_torch_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, require_pandas, require_tensorflow_probability, require_tf, require_torch, slow, ) if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_12 else: is_torch_greater_or_equal_than_1_12 = False @is_pipeline_test class TQAPipelineTests(unittest.TestCase): # Putting it there for consistency, but TQA do not have fast tokenizer # which are needed to generate automatic tests model_mapping = MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING @require_tensorflow_probability @require_pandas @require_tf @require_torch def test_small_model_tf(self): model_id = "lysandre/tiny-tapas-random-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_small_model_pt(self): model_id = "lysandre/tiny-tapas-random-wtq" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) self.assertIsInstance(model.config.aggregation_labels, dict) self.assertIsInstance(model.config.no_aggregation_label_index, int) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, {"answer": "AVERAGE > ", "coordinates": [], "cells": [], "aggregator": "AVERAGE"}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @require_torch def test_slow_tokenizer_sqa_pt(self): model_id = "lysandre/tiny-tapas-random-sqa" model = AutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @require_tf @require_tensorflow_probability @require_pandas @require_torch def test_slow_tokenizer_sqa_tf(self): model_id = "lysandre/tiny-tapas-random-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id, from_pt=True) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) inputs = { "table": { "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, "query": ["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], } sequential_outputs = table_querier(**inputs, sequential=True) batch_outputs = table_querier(**inputs, sequential=False) self.assertEqual(len(sequential_outputs), 3) self.assertEqual(len(batch_outputs), 3) self.assertEqual(sequential_outputs[0], batch_outputs[0]) self.assertNotEqual(sequential_outputs[1], batch_outputs[1]) # self.assertNotEqual(sequential_outputs[2], batch_outputs[2]) table_querier = TableQuestionAnsweringPipeline(model=model, tokenizer=tokenizer) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query="how many movies has george clooney played in?", ) self.assertEqual( outputs, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ) outputs = table_querier( table={ "actors": ["brad pitt", "leonardo di caprio", "george clooney"], "age": ["56", "45", "59"], "number of movies": ["87", "53", "69"], "date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], }, query=["how many movies has george clooney played in?", "how old is he?", "what's his date of birth?"], ) self.assertEqual( outputs, [ {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, {"answer": "7 february 1967", "coordinates": [(0, 3)], "cells": ["7 february 1967"]}, ], ) outputs = table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, query=[ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most" " active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ], ) self.assertEqual( outputs, [ {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, {"answer": "Python, Python", "coordinates": [(0, 3), (1, 3)], "cells": ["Python", "Python"]}, ], ) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table=None) with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table="") with self.assertRaises(ValueError): table_querier(query="What does it do with empty context ?", table={}) with self.assertRaises(ValueError): table_querier( table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } ) with self.assertRaises(ValueError): table_querier( query="", table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) with self.assertRaises(ValueError): table_querier( query=None, table={ "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], }, ) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_wtq_pt(self): table_querier = pipeline("table-question-answering") data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_wtq_tf(self): model_id = "google/tapas-base-finetuned-wtq" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline("table-question-answering", model=model, tokenizer=tokenizer) data = { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], } queries = [ "What repository has the largest number of stars?", "Given that the numbers of stars defines if a repository is active, what repository is the most active?", "What is the number of repositories?", "What is the average number of stars?", "What is the total amount of stars?", ] results = table_querier(data, queries) expected_results = [ {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, {"answer": "Transformers", "coordinates": [(0, 0)], "cells": ["Transformers"], "aggregator": "NONE"}, { "answer": "COUNT > Transformers, Datasets, Tokenizers", "coordinates": [(0, 0), (1, 0), (2, 0)], "cells": ["Transformers", "Datasets", "Tokenizers"], "aggregator": "COUNT", }, { "answer": "AVERAGE > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "AVERAGE", }, { "answer": "SUM > 36542, 4512, 3934", "coordinates": [(0, 1), (1, 1), (2, 1)], "cells": ["36542", "4512", "3934"], "aggregator": "SUM", }, ] self.assertListEqual(results, expected_results) @unittest.skipIf(not is_torch_greater_or_equal_than_1_12, reason="Tapas is only available in torch v1.12+") @slow @require_torch def test_integration_sqa_pt(self): table_querier = pipeline( "table-question-answering", model="google/tapas-base-finetuned-sqa", tokenizer="google/tapas-base-finetuned-sqa", ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_tensorflow_probability @require_pandas def test_integration_sqa_tf(self): model_id = "google/tapas-base-finetuned-sqa" model = TFAutoModelForTableQuestionAnswering.from_pretrained(model_id) tokenizer = AutoTokenizer.from_pretrained(model_id) table_querier = pipeline( "table-question-answering", model=model, tokenizer=tokenizer, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = ["How many movies has George Clooney played in?", "How old is he?", "What's his date of birth?"] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": "69", "coordinates": [(2, 2)], "cells": ["69"]}, {"answer": "59", "coordinates": [(2, 1)], "cells": ["59"]}, {"answer": "28 november 1967", "coordinates": [(2, 3)], "cells": ["28 november 1967"]}, ] self.assertListEqual(results, expected_results) @slow @require_torch def test_large_model_pt_tapex(self): model_id = "microsoft/tapex-large-finetuned-wtq" table_querier = pipeline( "table-question-answering", model=model_id, ) data = { "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Age": ["56", "45", "59"], "Number of movies": ["87", "53", "69"], "Date of birth": ["7 february 1967", "10 june 1996", "28 november 1967"], } queries = [ "How many movies has George Clooney played in?", "How old is Mr Clooney ?", "What's the date of birth of Leonardo ?", ] results = table_querier(data, queries, sequential=True) expected_results = [ {"answer": " 69"}, {"answer": " 59"}, {"answer": " 10 june 1996"}, ] self.assertListEqual(results, expected_results)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_feature_extraction.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import ( FEATURE_EXTRACTOR_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_MAPPING, TF_MODEL_MAPPING, FeatureExtractionPipeline, LxmertConfig, is_tf_available, is_torch_available, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf @is_pipeline_test class FeatureExtractionPipelineTests(unittest.TestCase): model_mapping = MODEL_MAPPING tf_model_mapping = TF_MODEL_MAPPING @require_torch def test_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip @require_tf def test_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip @require_torch def test_tokenization_small_model_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) # test with empty parameters outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip # test with various tokenizer parameters tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) # raise value error if truncation parameter given for two places tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_tf def test_tokenization_small_model_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) # test with empty parameters outputs = feature_extractor("This is a test") self.assertEqual( nested_simplify(outputs), [[[2.287, 1.234, 0.042, 1.53, 1.306, 0.879, -0.526, -1.71, -1.276, 0.756, -0.775, -1.048, -0.25, -0.595, -0.137, -0.598, 2.022, -0.812, 0.284, -0.488, -0.391, -0.403, -0.525, -0.061, -0.228, 1.086, 0.378, -0.14, 0.599, -0.087, -2.259, -0.098], [1.676, 0.232, -1.508, -0.145, 1.798, -1.388, 1.331, -0.37, -0.939, 0.043, 0.06, -0.414, -1.408, 0.24, 0.622, -0.55, -0.569, 1.873, -0.706, 1.924, -0.254, 1.927, -0.423, 0.152, -0.952, 0.509, -0.496, -0.968, 0.093, -1.049, -0.65, 0.312], [0.207, -0.775, -1.822, 0.321, -0.71, -0.201, 0.3, 1.146, -0.233, -0.753, -0.305, 1.309, -1.47, -0.21, 1.802, -1.555, -1.175, 1.323, -0.303, 0.722, -0.076, 0.103, -1.406, 1.931, 0.091, 0.237, 1.172, 1.607, 0.253, -0.9, -1.068, 0.438], [0.615, 1.077, 0.171, -0.175, 1.3, 0.901, -0.653, -0.138, 0.341, -0.654, -0.184, -0.441, -0.424, 0.356, -0.075, 0.26, -1.023, 0.814, 0.524, -0.904, -0.204, -0.623, 1.234, -1.03, 2.594, 0.56, 1.831, -0.199, -1.508, -0.492, -1.687, -2.165], [0.129, 0.008, -1.279, -0.412, -0.004, 1.663, 0.196, 0.104, 0.123, 0.119, 0.635, 1.757, 2.334, -0.799, -1.626, -1.26, 0.595, -0.316, -1.399, 0.232, 0.264, 1.386, -1.171, -0.256, -0.256, -1.944, 1.168, -0.368, -0.714, -0.51, 0.454, 1.148], [-0.32, 0.29, -1.309, -0.177, 0.453, 0.636, -0.024, 0.509, 0.931, -1.754, -1.575, 0.786, 0.046, -1.165, -1.416, 1.373, 1.293, -0.285, -1.541, -1.186, -0.106, -0.994, 2.001, 0.972, -0.02, 1.654, -0.236, 0.643, 1.02, 0.572, -0.914, -0.154], [0.7, -0.937, 0.441, 0.25, 0.78, -0.022, 0.282, -0.095, 1.558, -0.336, 1.706, 0.884, 1.28, 0.198, -0.796, 1.218, -1.769, 1.197, -0.342, -0.177, -0.645, 1.364, 0.008, -0.597, -0.484, -2.772, -0.696, -0.632, -0.34, -1.527, -0.562, 0.862], [2.504, 0.831, -1.271, -0.033, 0.298, -0.735, 1.339, 1.74, 0.233, -1.424, -0.819, -0.761, 0.291, 0.853, -0.092, -0.885, 0.164, 1.025, 0.907, 0.749, -1.515, -0.545, -1.365, 0.271, 0.034, -2.005, 0.031, 0.244, 0.621, 0.176, 0.336, -1.196], [-0.711, 0.591, -1.001, -0.946, 0.784, -1.66, 1.545, 0.799, -0.857, 1.148, 0.213, -0.285, 0.464, -0.139, 0.79, -1.663, -1.121, 0.575, -0.178, -0.508, 1.565, -0.242, -0.346, 1.024, -1.135, -0.158, -2.101, 0.275, 2.009, -0.425, 0.716, 0.981], [0.912, -1.186, -0.846, -0.421, -1.315, -0.827, 0.309, 0.533, 1.029, -2.343, 1.513, -1.238, 1.487, -0.849, 0.896, -0.927, -0.459, 0.159, 0.177, 0.873, 0.935, 1.433, -0.485, 0.737, 1.327, -0.338, 1.608, -0.47, -0.445, -1.118, -0.213, -0.446], [-0.434, -1.362, -1.098, -1.068, 1.507, 0.003, 0.413, -0.395, 0.897, -0.237, 1.405, -0.344, 1.693, 0.677, 0.097, -0.257, -0.602, 1.026, -1.229, 0.855, -0.713, 1.014, 0.443, 0.238, 0.425, -2.184, 1.933, -1.157, -1.132, -0.597, -0.785, 0.967], [0.58, -0.971, 0.789, -0.468, -0.576, 1.779, 1.747, 1.715, -1.939, 0.125, 0.656, -0.042, -1.024, -1.767, 0.107, -0.408, -0.866, -1.774, 1.248, 0.939, -0.033, 1.523, 1.168, -0.744, 0.209, -0.168, -0.316, 0.207, -0.432, 0.047, -0.646, -0.664], [-0.185, -0.613, -1.695, 1.602, -0.32, -0.277, 0.967, 0.728, -0.965, -0.234, 1.069, -0.63, -1.631, 0.711, 0.426, 1.298, -0.191, -0.467, -0.771, 0.971, -0.118, -1.577, -2.064, -0.055, -0.59, 0.642, -0.997, 1.251, 0.538, 1.367, 0.106, 1.704]]]) # fmt: skip # test with various tokenizer parameters tokenize_kwargs = {"max_length": 3} outputs = feature_extractor("This is a test", tokenize_kwargs=tokenize_kwargs) self.assertEqual(np.squeeze(outputs).shape, (3, 32)) tokenize_kwargs = {"truncation": True, "padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) tokenize_kwargs = {"padding": True, "max_length": 4} outputs = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) self.assertEqual(np.squeeze(outputs).shape, (5, 4, 32)) # raise value error if truncation parameter given for two places tokenize_kwargs = {"truncation": True} with self.assertRaises(ValueError): _ = feature_extractor( ["This is a test", "This", "This is", "This is a", "This is a test test test test"], truncation=True, tokenize_kwargs=tokenize_kwargs, ) @require_torch def test_return_tensors_pt(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="pt" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(torch.is_tensor(outputs)) @require_tf def test_return_tensors_tf(self): feature_extractor = pipeline( task="feature-extraction", model="hf-internal-testing/tiny-random-distilbert", framework="tf" ) outputs = feature_extractor("This is a test", return_tensors=True) self.assertTrue(tf.is_tensor(outputs)) def get_shape(self, input_, shape=None): if shape is None: shape = [] if isinstance(input_, list): subshapes = [self.get_shape(in_, shape) for in_ in input_] if all(s == 0 for s in subshapes): shape.append(len(input_)) else: subshape = subshapes[0] shape = [len(input_), *subshape] elif isinstance(input_, float): return 0 else: raise ValueError("We expect lists of floats, nothing else") return shape def get_test_pipeline(self, model, tokenizer, processor): if tokenizer is None: self.skipTest("No tokenizer") return elif ( type(model.config) in FEATURE_EXTRACTOR_MAPPING or isinstance(model.config, LxmertConfig) or type(model.config) in IMAGE_PROCESSOR_MAPPING ): self.skipTest("This is a bimodal model, we need to find a more consistent way to switch on those models.") return elif model.config.is_encoder_decoder: self.skipTest( """encoder_decoder models are trickier for this pipeline. Do we want encoder + decoder inputs to get some featues? Do we want encoder only features ? For now ignore those. """ ) return feature_extractor = FeatureExtractionPipeline(model=model, tokenizer=tokenizer, feature_extractor=processor) return feature_extractor, ["This is a test", "This is another test"] def run_pipeline_test(self, feature_extractor, examples): outputs = feature_extractor("This is a test") shape = self.get_shape(outputs) self.assertEqual(shape[0], 1) # If we send too small input # there's a bug within FunnelModel (output with shape [1, 4, 2, 1] doesn't match the broadcast shape [1, 4, 2, 2]) outputs = feature_extractor(["This is a test", "Another longer test"]) shape = self.get_shape(outputs) self.assertEqual(shape[0], 2) outputs = feature_extractor("This is a test" * 100, truncation=True) shape = self.get_shape(outputs) self.assertEqual(shape[0], 1)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_image_segmentation.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest from typing import Dict import datasets import numpy as np import requests from datasets import load_dataset from huggingface_hub.utils import insecure_hashlib from transformers import ( MODEL_FOR_IMAGE_SEGMENTATION_MAPPING, MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING, MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING, AutoImageProcessor, AutoModelForImageSegmentation, AutoModelForInstanceSegmentation, DetrForSegmentation, ImageSegmentationPipeline, MaskFormerForInstanceSegmentation, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass def hashimage(image: Image) -> str: m = insecure_hashlib.md5(image.tobytes()) return m.hexdigest()[:10] def mask_to_test_readable(mask: Image) -> Dict: npimg = np.array(mask) white_pixels = (npimg == 255).sum() shape = npimg.shape return {"hash": hashimage(mask), "white_pixels": white_pixels, "shape": shape} def mask_to_test_readable_only_shape(mask: Image) -> Dict: npimg = np.array(mask) shape = npimg.shape return {"shape": shape} @is_pipeline_test @require_vision @require_timm @require_torch class ImageSegmentationPipelineTests(unittest.TestCase): model_mapping = dict( (list(MODEL_FOR_IMAGE_SEGMENTATION_MAPPING.items()) if MODEL_FOR_IMAGE_SEGMENTATION_MAPPING else []) + (MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING.items() if MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING else []) + (MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING.items() if MODEL_FOR_INSTANCE_SEGMENTATION_MAPPING else []) ) def get_test_pipeline(self, model, tokenizer, processor): image_segmenter = ImageSegmentationPipeline(model=model, image_processor=processor) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def run_pipeline_test(self, image_segmenter, examples): outputs = image_segmenter( "./tests/fixtures/tests_samples/COCO/000000039769.png", threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, ) self.assertIsInstance(outputs, list) n = len(outputs) if isinstance(image_segmenter.model, (MaskFormerForInstanceSegmentation, DetrForSegmentation)): # Instance segmentation (maskformer, and detr) have a slot for null class # and can output nothing even with a low threshold self.assertGreaterEqual(n, 0) else: self.assertGreaterEqual(n, 1) # XXX: PIL.Image implements __eq__ which bypasses ANY, so we inverse the comparison # to make it work self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, outputs) # we use revision="refs/pr/1" until the PR is merged # https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1 dataset = datasets.load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1") # RGBA outputs = image_segmenter(dataset[0]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # LA outputs = image_segmenter(dataset[1]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) # L outputs = image_segmenter(dataset[2]["image"], threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0) m = len(outputs) self.assertEqual([{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * m, outputs) if isinstance(image_segmenter.model, DetrForSegmentation): # We need to test batch_size with images with the same size. # Detr doesn't normalize the size of the images, meaning we can have # 800x800 or 800x1200, meaning we cannot batch simply. # We simply bail on this batch_size = 1 else: batch_size = 2 # 5 times the same image so the output shape is predictable batch = [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] outputs = image_segmenter( batch, threshold=0.0, mask_threshold=0, overlap_mask_area_threshold=0, batch_size=batch_size, ) self.assertEqual(len(batch), len(outputs)) self.assertEqual(len(outputs[0]), n) self.assertEqual( [ [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, [{"score": ANY(float, type(None)), "label": ANY(str), "mask": ANY(Image.Image)}] * n, ], outputs, f"Expected [{n}, {n}, {n}, {n}, {n}], got {[len(item) for item in outputs]}", ) @require_tf @unittest.skip("Image segmentation not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt_no_panoptic(self): model_id = "hf-internal-testing/tiny-random-mobilevit" # The default task is `image-classification` we need to override pipe = pipeline(task="image-segmentation", model=model_id) # This model does NOT support neither `instance` nor `panoptic` # We should error out with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="panoptic") self.assertEqual( str(e.exception), "Subtask panoptic is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) with self.assertRaises(ValueError) as e: pipe("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") self.assertEqual( str(e.exception), "Subtask instance is not supported for model <class" " 'transformers.models.mobilevit.modeling_mobilevit.MobileViTForSemanticSegmentation'>", ) @require_torch def test_small_model_pt(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = ImageSegmentationPipeline( model=model, image_processor=image_processor, subtask="panoptic", threshold=0.0, mask_threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) # This is extremely brittle, and those values are made specific for the CI. self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ], ) output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="instance") for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(output, decimals=4), [ { "score": 0.004, "label": "LABEL_215", "mask": {"hash": "a01498ca7c", "shape": (480, 640), "white_pixels": 307200}, }, ], ) # This must be surprising to the reader. # The `panoptic` returns only LABEL_215, and this returns 3 labels. # output = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", subtask="semantic") output_masks = [o["mask"] for o in output] # page links (to visualize) expected_masks = [ "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_0.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_1.png", "https://huggingface.co/datasets/hf-internal-testing/mask-for-image-segmentation-tests/blob/main/mask_2.png", ] # actual links to get files expected_masks = [x.replace("/blob/", "/resolve/") for x in expected_masks] expected_masks = [Image.open(requests.get(image, stream=True).raw) for image in expected_masks] # Convert masks to numpy array output_masks = [np.array(x) for x in output_masks] expected_masks = [np.array(x) for x in expected_masks] self.assertEqual(output_masks[0].shape, expected_masks[0].shape) self.assertEqual(output_masks[1].shape, expected_masks[1].shape) self.assertEqual(output_masks[2].shape, expected_masks[2].shape) # With un-trained tiny random models, the output `logits` tensor is very likely to contain many values # close to each other, which cause `argmax` to give quite different results when running the test on 2 # environments. We use a lower threshold `0.9` here to avoid flakiness. self.assertGreaterEqual(np.mean(output_masks[0] == expected_masks[0]), 0.9) self.assertGreaterEqual(np.mean(output_masks[1] == expected_masks[1]), 0.9) self.assertGreaterEqual(np.mean(output_masks[2] == expected_masks[2]), 0.9) for o in output: o["mask"] = mask_to_test_readable_only_shape(o["mask"]) self.maxDiff = None self.assertEqual( nested_simplify(output, decimals=4), [ { "label": "LABEL_88", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_101", "mask": {"shape": (480, 640)}, "score": None, }, { "label": "LABEL_215", "mask": {"shape": (480, 640)}, "score": None, }, ], ) @require_torch def test_small_model_pt_semantic(self): model_id = "hf-internal-testing/tiny-random-beit-pipeline" image_segmenter = pipeline(model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg") for o in outputs: # shortening by hashing o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "LABEL_0", "mask": {"hash": "42d0907228", "shape": (480, 640), "white_pixels": 10714}, }, { "score": None, "label": "LABEL_1", "mask": {"hash": "46b8cc3976", "shape": (480, 640), "white_pixels": 296486}, }, ], ) @require_torch @slow def test_integration_torch_image_segmentation(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline( "image-segmentation", model=model_id, threshold=0.0, overlap_mask_area_threshold=0.0, ) outputs = image_segmenter( "http://images.cocodataset.org/val2017/000000039769.jpg", ) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) outputs = image_segmenter( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ], ) # Shortening by hashing for output in outputs: for o in output: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], [ { "score": 0.9094, "label": "blanket", "mask": {"hash": "dcff19a97a", "shape": (480, 640), "white_pixels": 16617}, }, { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ], ) @require_torch @slow def test_threshold(self): model_id = "facebook/detr-resnet-50-panoptic" image_segmenter = pipeline("image-segmentation", model=model_id) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.999) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9995, "label": "remote", "mask": {"hash": "d02404f578", "shape": (480, 640), "white_pixels": 2789}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "eaa115b40c", "shape": (480, 640), "white_pixels": 304411}, }, ], ) outputs = image_segmenter("http://images.cocodataset.org/val2017/000000039769.jpg", threshold=0.5) for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9941, "label": "cat", "mask": {"hash": "9c0af87bd0", "shape": (480, 640), "white_pixels": 59185}, }, { "score": 0.9987, "label": "remote", "mask": {"hash": "c7870600d6", "shape": (480, 640), "white_pixels": 4182}, }, { "score": 0.9995, "label": "remote", "mask": {"hash": "ef899a25fd", "shape": (480, 640), "white_pixels": 2275}, }, { "score": 0.9722, "label": "couch", "mask": {"hash": "37b8446ac5", "shape": (480, 640), "white_pixels": 172380}, }, { "score": 0.9994, "label": "cat", "mask": {"hash": "6a09d3655e", "shape": (480, 640), "white_pixels": 52561}, }, ], ) @require_torch @slow def test_maskformer(self): threshold = 0.8 model_id = "facebook/maskformer-swin-base-ade" model = AutoModelForInstanceSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline("image-segmentation", model=model, image_processor=image_processor) image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=threshold) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9974, "label": "wall", "mask": {"hash": "a547b7c062", "shape": (512, 683), "white_pixels": 14252}, }, { "score": 0.949, "label": "house", "mask": {"hash": "0da9b7b38f", "shape": (512, 683), "white_pixels": 132177}, }, { "score": 0.9995, "label": "grass", "mask": {"hash": "1d07ea0a26", "shape": (512, 683), "white_pixels": 53444}, }, { "score": 0.9976, "label": "tree", "mask": {"hash": "6cdc97c7da", "shape": (512, 683), "white_pixels": 7944}, }, { "score": 0.8239, "label": "plant", "mask": {"hash": "1ab4ce378f", "shape": (512, 683), "white_pixels": 4136}, }, { "score": 0.9942, "label": "road, route", "mask": {"hash": "39c5d17be5", "shape": (512, 683), "white_pixels": 1941}, }, { "score": 1.0, "label": "sky", "mask": {"hash": "a3756324a6", "shape": (512, 683), "white_pixels": 135802}, }, ], ) @require_torch @slow def test_oneformer(self): image_segmenter = pipeline(model="shi-labs/oneformer_ade20k_swin_tiny") image = load_dataset("hf-internal-testing/fixtures_ade20k", split="test") file = image[0]["file"] outputs = image_segmenter(file, threshold=0.99) # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9981, "label": "grass", "mask": {"hash": "3a92904d4c", "white_pixels": 118131, "shape": (512, 683)}, }, { "score": 0.9992, "label": "sky", "mask": {"hash": "fa2300cc9a", "white_pixels": 231565, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, threshold=0.99, subtask="instance") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": 0.9991, "label": "sky", "mask": {"hash": "8b1ffad016", "white_pixels": 230566, "shape": (512, 683)}, }, { "score": 0.9981, "label": "grass", "mask": {"hash": "9bbdf83d3d", "white_pixels": 119130, "shape": (512, 683)}, }, ], ) # Different task outputs = image_segmenter(file, subtask="semantic") # Shortening by hashing for o in outputs: o["mask"] = mask_to_test_readable(o["mask"]) self.assertEqual( nested_simplify(outputs, decimals=4), [ { "score": None, "label": "wall", "mask": {"hash": "897fb20b7f", "white_pixels": 14506, "shape": (512, 683)}, }, { "score": None, "label": "building", "mask": {"hash": "f2a68c63e4", "white_pixels": 125019, "shape": (512, 683)}, }, { "score": None, "label": "sky", "mask": {"hash": "e0ca3a548e", "white_pixels": 135330, "shape": (512, 683)}, }, { "score": None, "label": "tree", "mask": {"hash": "7c9544bcac", "white_pixels": 16263, "shape": (512, 683)}, }, { "score": None, "label": "road, route", "mask": {"hash": "2c7704e491", "white_pixels": 2143, "shape": (512, 683)}, }, { "score": None, "label": "grass", "mask": {"hash": "bf6c2867e0", "white_pixels": 53040, "shape": (512, 683)}, }, { "score": None, "label": "plant", "mask": {"hash": "93c4b7199e", "white_pixels": 3335, "shape": (512, 683)}, }, { "score": None, "label": "house", "mask": {"hash": "93ec419ad5", "white_pixels": 60, "shape": (512, 683)}, }, ], ) def test_save_load(self): model_id = "hf-internal-testing/tiny-detr-mobilenetsv3-panoptic" model = AutoModelForImageSegmentation.from_pretrained(model_id) image_processor = AutoImageProcessor.from_pretrained(model_id) image_segmenter = pipeline( task="image-segmentation", model=model, image_processor=image_processor, ) with tempfile.TemporaryDirectory() as tmpdirname: image_segmenter.save_pretrained(tmpdirname) pipeline(task="image-segmentation", model=tmpdirname)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_zero_shot_object_detection.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class Image: @staticmethod def open(*args, **kwargs): pass @is_pipeline_test @require_vision @require_torch class ZeroShotObjectDetectionPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING def get_test_pipeline(self, model, tokenizer, processor): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) examples = [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ] return object_detector, examples def run_pipeline_test(self, object_detector, examples): outputs = object_detector(examples[0], threshold=0.0) n = len(outputs) self.assertGreater(n, 0) self.assertEqual( outputs, [ { "score": ANY(float), "label": ANY(str), "box": {"xmin": ANY(int), "ymin": ANY(int), "xmax": ANY(int), "ymax": ANY(int)}, } for i in range(n) ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_small_model_tf(self): pass @require_torch def test_small_model_pt(self): object_detector = pipeline( "zero-shot-object-detection", model="hf-internal-testing/tiny-random-owlvit-object-detection" ) outputs = object_detector( "./tests/fixtures/tests_samples/COCO/000000039769.png", candidate_labels=["cat", "remote", "couch"], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ], ) outputs = object_detector( [ { "image": "./tests/fixtures/tests_samples/COCO/000000039769.png", "candidate_labels": ["cat", "remote", "couch"], } ], threshold=0.64, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.7235, "label": "cat", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7218, "label": "remote", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.7184, "label": "couch", "box": {"xmin": 204, "ymin": 167, "xmax": 232, "ymax": 190}}, {"score": 0.6748, "label": "remote", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6656, "label": "cat", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6614, "label": "couch", "box": {"xmin": 571, "ymin": 83, "xmax": 598, "ymax": 103}}, {"score": 0.6456, "label": "remote", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, {"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 274, "xmax": 93, "ymax": 297}}, {"score": 0.6419, "label": "cat", "box": {"xmin": 494, "ymin": 105, "xmax": 521, "ymax": 127}}, ] ], ) @require_torch @slow def test_large_model_pt(self): object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ) outputs = object_detector( [ { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, { "image": "http://images.cocodataset.org/val2017/000000039769.jpg", "candidate_labels": ["cat", "remote", "couch"], }, ], ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, {"score": 0.1474, "label": "remote", "box": {"xmin": 335, "ymin": 74, "xmax": 371, "ymax": 187}}, {"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 642, "ymax": 476}}, ], ], ) @require_tf @unittest.skip("Zero Shot Object Detection not implemented in TF") def test_large_model_tf(self): pass @require_torch @slow def test_threshold(self): threshold = 0.2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], threshold=threshold, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, {"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 315, "ymax": 472}}, ], ) @require_torch @slow def test_top_k(self): top_k = 2 object_detector = pipeline("zero-shot-object-detection") outputs = object_detector( "http://images.cocodataset.org/val2017/000000039769.jpg", candidate_labels=["cat", "remote", "couch"], top_k=top_k, ) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.2868, "label": "cat", "box": {"xmin": 324, "ymin": 20, "xmax": 640, "ymax": 373}}, {"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 177, "ymax": 115}}, ], )
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/pipelines/test_pipelines_document_question_answering.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available from transformers.pipelines import pipeline from transformers.pipelines.document_question_answering import apply_tesseract from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_detectron2, require_pytesseract, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image from transformers.image_utils import load_image else: class Image: @staticmethod def open(*args, **kwargs): pass def load_image(_): return None # This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace, # so we can expect it to be available. INVOICE_URL = ( "https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png" ) @is_pipeline_test @require_torch @require_vision class DocumentQuestionAnsweringPipelineTests(unittest.TestCase): model_mapping = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING @require_pytesseract @require_vision def get_test_pipeline(self, model, tokenizer, processor): dqa_pipeline = pipeline( "document-question-answering", model=model, tokenizer=tokenizer, image_processor=processor ) image = INVOICE_URL word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) question = "What is the placebo?" examples = [ { "image": load_image(image), "question": question, }, { "image": image, "question": question, }, { "image": image, "question": question, "word_boxes": word_boxes, }, ] return dqa_pipeline, examples def run_pipeline_test(self, dqa_pipeline, examples): outputs = dqa_pipeline(examples, top_k=2) self.assertEqual( outputs, [ [ {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, {"score": ANY(float), "answer": ANY(str), "start": ANY(int), "end": ANY(int)}, ] ] * 3, ) @require_torch @require_detectron2 @require_pytesseract def test_small_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="hf-internal-testing/tiny-random-layoutlmv2-for-dqa-test" ) image = INVOICE_URL question = "How many cats are there?" expected_output = [ {"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39}, {"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40}, ] outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), expected_output) # This image does not detect ANY text in it, meaning layoutlmv2 should fail. # Empty answer probably image = "./tests/fixtures/tests_samples/COCO/000000039769.png" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(outputs, []) # We can optionnally pass directly the words and bounding boxes image = "./tests/fixtures/tests_samples/COCO/000000039769.png" words = [] boxes = [] outputs = dqa_pipeline(image=image, question=question, words=words, boxes=boxes, top_k=2) self.assertEqual(outputs, []) # TODO: Enable this once hf-internal-testing/tiny-random-donut is implemented # @require_torch # def test_small_model_pt_donut(self): # dqa_pipeline = pipeline("document-question-answering", model="hf-internal-testing/tiny-random-donut") # # dqa_pipeline = pipeline("document-question-answering", model="../tiny-random-donut") # image = "https://templates.invoicehome.com/invoice-template-us-neat-750px.png" # question = "How many cats are there?" # # outputs = dqa_pipeline(image=image, question=question, top_k=2) # self.assertEqual( # nested_simplify(outputs, decimals=4), [{"score": 0.8799, "answer": "2"}, {"score": 0.296, "answer": "1"}] # ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9944, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.0009, "answer": "us-001", "start": 16, "end": 16}, ], ] * 2, ) @slow @require_torch @require_detectron2 @require_pytesseract def test_large_model_pt_chunk(self): dqa_pipeline = pipeline( "document-question-answering", model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa", revision="9977165", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23}, {"score": 0.9948, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=3), [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline({"image": image, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=3), [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=3), [ [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=3), [ {"score": 0.425, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.082, "answer": "1110212019", "start": 23, "end": 23}, ], ) @slow @require_torch @require_pytesseract @require_vision def test_large_model_pt_layoutlm_chunk(self): tokenizer = AutoTokenizer.from_pretrained( "impira/layoutlm-document-qa", revision="3dc6de3", add_prefix_space=True ) dqa_pipeline = pipeline( "document-question-answering", model="impira/layoutlm-document-qa", tokenizer=tokenizer, revision="3dc6de3", max_seq_len=50, ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) outputs = dqa_pipeline( [{"image": image, "question": question}, {"image": image, "question": question}], top_k=2 ) self.assertEqual( nested_simplify(outputs, decimals=4), [ [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ] ] * 2, ) word_boxes = list(zip(*apply_tesseract(load_image(image), None, ""))) # This model should also work if `image` is set to None outputs = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question}, top_k=2) self.assertEqual( nested_simplify(outputs, decimals=4), [ {"score": 0.9999, "answer": "us-001", "start": 16, "end": 16}, {"score": 0.9998, "answer": "us-001", "start": 16, "end": 16}, ], ) @slow @require_torch def test_large_model_pt_donut(self): dqa_pipeline = pipeline( "document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa", tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa"), image_processor="naver-clova-ix/donut-base-finetuned-docvqa", ) image = INVOICE_URL question = "What is the invoice number?" outputs = dqa_pipeline(image=image, question=question, top_k=2) self.assertEqual(nested_simplify(outputs, decimals=4), [{"answer": "us-001"}]) @require_tf @unittest.skip("Document question answering not implemented in TF") def test_small_model_tf(self): pass
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/sagemaker/test_multi_node_model_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "FacebookAI/roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 1600, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): # configuration for running training on smdistributed Model Parallel mpi_options = { "enabled": True, "processes_per_host": 8, } smp_options = { "enabled": True, "parameters": { "microbatches": 4, "placement_strategy": "spread", "pipeline": "interleaved", "optimize": "speed", "partitions": 4, "ddp": True, }, } distribution = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options} name_extension = "trainer" if self.script == "run_glue.py" else "smtrainer" # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={ **self.env.hyperparameters, "model_name_or_path": self.model_name_or_path, "max_steps": 500, }, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(1,)]) def test_scripz(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/sagemaker/conftest.py
# we define a fixture function below and it will be "used" by # referencing its name from tests import os import pytest from attr import dataclass os.environ["AWS_DEFAULT_REGION"] = "us-east-1" # defaults region @dataclass class SageMakerTestEnvironment: framework: str role = "arn:aws:iam::558105141721:role/sagemaker_execution_role" hyperparameters = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 500, "save_steps": 5500, } distributed_hyperparameters = {**hyperparameters, "max_steps": 1000} @property def metric_definitions(self) -> str: if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def base_job_name(self) -> str: return f"{self.framework}-transfromers-test" @property def test_path(self) -> str: return f"./tests/sagemaker/scripts/{self.framework}" @property def image_uri(self) -> str: if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="class") def sm_env(request): request.cls.env = SageMakerTestEnvironment(framework=request.cls.framework)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/sagemaker/test_single_node_gpu.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class SingleNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count=1): # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=f"{self.env.base_job_name}-single", instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") def test_glue(self): # create estimator estimator = self.create_estimator() # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/sagemaker/__init__.py
import importlib def is_sagemaker_available(): return importlib.util.find_spec("sagemaker") is not None
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/sagemaker/test_multi_node_data_parallel.py
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER", "False")) is not True, reason="Skipping test because should only be run when releasing minor transformers version", ) @pytest.mark.usefixtures("sm_env") @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 650, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert/distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 600, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class MultiNodeTest(unittest.TestCase): def setUp(self): if self.framework == "pytorch": subprocess.run( f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split(), encoding="utf-8", check=True, ) assert hasattr(self, "env") def create_estimator(self, instance_count): job_name = f"{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}" # distributed data settings distribution = {"smdistributed": {"dataparallel": {"enabled": True}}} if self.script != "run_ddp.py" else None # creates estimator return HuggingFace( entry_point=self.script, source_dir=self.env.test_path, role=self.env.role, image_uri=self.env.image_uri, base_job_name=job_name, instance_count=instance_count, instance_type=self.instance_type, debugger_hook_config=False, hyperparameters={**self.env.distributed_hyperparameters, "model_name_or_path": self.model_name_or_path}, metric_definitions=self.env.metric_definitions, distribution=distribution, py_version="py36", ) def save_results_as_csv(self, job_name): TrainingJobAnalytics(job_name).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv") # @parameterized.expand([(2,), (4,),]) @parameterized.expand([(2,)]) def test_script(self, instance_count): # create estimator estimator = self.create_estimator(instance_count) # run training estimator.fit() # result dataframe result_metrics_df = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis eval_accuracy = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"]) eval_loss = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"]) # get train time from SageMaker job, this includes starting, preprocessing, stopping train_runtime = ( Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds", 999999) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy) assert all(t <= self.results["eval_loss"] for t in eval_loss) # dump tests result into json file to share in PR with open(f"{estimator.latest_training_job.name}.json", "w") as outfile: json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss}, outfile)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/sagemaker/README.md
# Testing new Hugging Face Deep Learning Container. This document explains the testing strategy for releasing the new Hugging Face Deep Learning Container. AWS maintains 14 days of currency with framework releases. Besides framework releases, AWS release train is bi-weekly on Monday. Code cutoff date for any changes is the Wednesday before release-Monday. ## Test Case 1: Releasing a New Version (Minor/Major) of 🤗 Transformers ### Requirements: Test should run on Release Candidate for new `transformers` release to validate the new release is compatible with the DLCs. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for PyTorch under `/tests/sagemaker/scripts/pytorch` and for TensorFlow under `/tests/sagemaker/scripts/pytorch`. We adjust the branch to the new RC-tag. ``` git+https://github.com/huggingface/transformers.git@v4.5.0.rc0 # install main or adjust ist with vX.X.X for installing version specific-transforms ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with: ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After Transformers Release: After we have released the Release Candidate we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Update the two latest `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow). The two latest `buildspec.yaml` are the `buildspec.yaml` without a version tag and the one with the highest framework version, e.g. `buildspec-1-7-1.yml` and not `buildspec-1-6.yml`. To update the `buildspec.yaml` we need to adjust either the `transformers_version` or the `datasets_version` or both. Example for upgrading to `transformers 4.5.0` and `datasets 1.6.0`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.6.0 short_version: &SHORT_VERSION 1.6 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.5.0 # this was adjusted from 4.4.2 to 4.5.0 datasets_version: &DATASETS_VERSION 1.6.0 # this was adjusted from 1.5.0 to 1.6.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test, we ran and with which package versions. Here you can copy the table from [Current Tests](#current-tests). 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1016), which information are needed. ## Test Case 2: Releasing a New AWS Framework DLC ## Execute Tests ### Requirements: AWS is going to release new DLCs for PyTorch and/or TensorFlow. The Tests should run on the new framework versions with current `transformers` release to validate the new framework release is compatible with the `transformers` version. To run these tests you need credentials for the HF SageMaker AWS Account. You can ask @philschmid or @n1t0 to get access. AWS will notify us with a new issue in the repository pointing to their framework upgrade PR. ### Run Tests: Before we can run the tests we need to adjust the `requirements.txt` for Pytorch under `/tests/sagemaker/scripts/pytorch` and for Tensorflow under `/tests/sagemaker/scripts/pytorch`. We add the new framework version to it. ``` torch==1.8.1 # for pytorch tensorflow-gpu==2.5.0 # for tensorflow ``` After we adjusted the `requirements.txt` we can run Amazon SageMaker tests with. ```bash AWS_PROFILE=<enter-your-profile> make test-sagemaker ``` These tests take around 10-15 minutes to finish. Preferably make a screenshot of the successfully ran tests. ### After successful Tests: After we have successfully run tests for the new framework version we need to create a PR at the [Deep Learning Container Repository](https://github.com/aws/deep-learning-containers). **Creating the update PR:** 1. Create a new `buildspec.yaml` config for [PyTorch](https://github.com/aws/deep-learning-containers/tree/master/huggingface/pytorch) and [TensorFlow](https://github.com/aws/deep-learning-containers/tree/master/huggingface/tensorflow) and rename the old `buildspec.yaml` to `buildespec-x.x.x`, where `x.x.x` is the base framework version, e.g. if pytorch 1.6.0 is the latest version in `buildspec.yaml` the file should be renamed to `buildspec-yaml-1-6.yaml`. To create the new `buildspec.yaml` we need to adjust the `version` and the `short_version`. Example for upgrading to `pytorch 1.7.1`. ```yaml account_id: &ACCOUNT_ID <set-$ACCOUNT_ID-in-environment> region: &REGION <set-$REGION-in-environment> base_framework: &BASE_FRAMEWORK pytorch framework: &FRAMEWORK !join [ "huggingface_", *BASE_FRAMEWORK] version: &VERSION 1.7.1 # this was adjusted from 1.6.0 to 1.7.1 short_version: &SHORT_VERSION 1.7 # this was adjusted from 1.6 to 1.7 repository_info: training_repository: &TRAINING_REPOSITORY image_type: &TRAINING_IMAGE_TYPE training root: !join [ "huggingface/", *BASE_FRAMEWORK, "/", *TRAINING_IMAGE_TYPE ] repository_name: &REPOSITORY_NAME !join ["pr", "-", "huggingface", "-", *BASE_FRAMEWORK, "-", *TRAINING_IMAGE_TYPE] repository: &REPOSITORY !join [ *ACCOUNT_ID, .dkr.ecr., *REGION, .amazonaws.com/, *REPOSITORY_NAME ] images: BuildHuggingFacePytorchGpuPy37Cu110TrainingDockerImage: <<: *TRAINING_REPOSITORY build: &HUGGINGFACE_PYTORCH_GPU_TRAINING_PY3 false image_size_baseline: &IMAGE_SIZE_BASELINE 15000 device_type: &DEVICE_TYPE gpu python_version: &DOCKER_PYTHON_VERSION py3 tag_python_version: &TAG_PYTHON_VERSION py36 cuda_version: &CUDA_VERSION cu110 os_version: &OS_VERSION ubuntu18.04 transformers_version: &TRANSFORMERS_VERSION 4.4.2 datasets_version: &DATASETS_VERSION 1.5.0 tag: !join [ *VERSION, '-', 'transformers', *TRANSFORMERS_VERSION, '-', *DEVICE_TYPE, '-', *TAG_PYTHON_VERSION, '-', *CUDA_VERSION, '-', *OS_VERSION ] docker_file: !join [ docker/, *SHORT_VERSION, /, *DOCKER_PYTHON_VERSION, /, *CUDA_VERSION, /Dockerfile., *DEVICE_TYPE ] ``` 2. In the PR comment describe what test we ran and with which framework versions. Here you can copy the table from [Current Tests](#current-tests). You can take a look at this [PR](https://github.com/aws/deep-learning-containers/pull/1025), which information are needed. ## Current Tests | ID | Description | Platform | #GPUS | Collected & evaluated metrics | |-------------------------------------|-------------------------------------------------------------------|-----------------------------|-------|------------------------------------------| | pytorch-transfromers-test-single | test bert finetuning using BERT fromtransformerlib+PT | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-ddp | test bert finetuning using BERT from transformer lib+ PT DPP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ PT SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss | | pytorch-transfromers-test-1-smp | test roberta finetuning using BERT from transformer lib+ PT SM MP | SageMaker createTrainingJob | 8 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-single | Test bert finetuning using BERT from transformer lib+TF | SageMaker createTrainingJob | 1 | train_runtime, eval_accuracy & eval_loss | | tensorflow-transfromers-test-2-smd | test bert finetuning using BERT from transformer lib+ TF SM DDP | SageMaker createTrainingJob | 16 | train_runtime, eval_accuracy & eval_loss |
0
mavonic_private_repos/transformers/tests/sagemaker/scripts
mavonic_private_repos/transformers/tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on GLUE.""" # You can also adapt this script on your own text classification task. Pointers for this are left as comments. import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import load_dataset, load_metric import transformers from transformers import ( # Trainer,; TrainingArguments, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, PretrainedConfig, default_data_collator, set_seed, ) # Will import SageMaker Model parallelism specific Trainer from transformers.sagemaker import SageMakerTrainer as Trainer from transformers.sagemaker import SageMakerTrainingArguments as TrainingArguments from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.4.2") task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ task_name: Optional[str] = field( default=None, metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=True, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.task_name is not None: self.task_name = self.task_name.lower() if self.task_name not in task_to_keys.keys(): raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys())) elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task or a training/validation file.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if training_args.should_log: transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.task_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset("glue", data_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files datasets = load_dataset("csv", data_files=data_files) else: # Loading a dataset from local json files datasets = load_dataset("json", data_files=data_files) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels if data_args.task_name is not None: is_regression = data_args.task_name == "stsb" if not is_regression: label_list = datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique label_list = datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Preprocessing the datasets if data_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[data_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and data_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if sorted(label_name_to_id.keys()) == sorted(label_list): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: logger.warning( "Your model seems to have been trained with labels, but they don't match the dataset: ", f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}." "\nIgnoring the model labels as a result.", ) elif data_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_function(examples): # Tokenize the texts args = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) # Map labels to IDs (not necessary for GLUE tasks) if label_to_id is not None and "label" in examples: result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] return result datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets and "validation_matched" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None: if "test" not in datasets and "test_matched" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # Get the metric function if data_args.task_name is not None: metric = load_metric("glue", data_args.task_name) # TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from # compute_metrics # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) if data_args.task_name is not None: result = metric.compute(predictions=preds, references=p.label_ids) if len(result) > 1: result["combined_score"] = np.mean(list(result.values())).item() return result elif is_regression: return {"mse": ((preds - p.label_ids) ** 2).mean().item()} else: return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): # Check the config from that potential checkpoint has the right number of labels before using it as a # checkpoint. if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: checkpoint = model_args.model_name_or_path train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] eval_datasets = [eval_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") eval_datasets.append(datasets["validation_mismatched"]) for eval_dataset, task in zip(eval_datasets, tasks): metrics = trainer.evaluate(eval_dataset=eval_dataset) max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Test ***") # Loop to handle MNLI double evaluation (matched, mis-matched) tasks = [data_args.task_name] test_datasets = [test_dataset] if data_args.task_name == "mnli": tasks.append("mnli-mm") test_datasets.append(datasets["test_mismatched"]) for test_dataset, task in zip(test_datasets, tasks): # Removing the `label` columns because it contains -1 and Trainer won't like that. test_dataset = test_dataset.remove_columns("label") predictions = trainer.predict(test_dataset=test_dataset).predictions predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") if trainer.is_world_process_zero(): with open(output_test_file, "w") as writer: logger.info(f"***** Test results {task} *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): if is_regression: writer.write(f"{index}\t{item:3.3f}\n") else: item = label_list[item] writer.write(f"{index}\t{item}\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/tests/sagemaker/scripts
mavonic_private_repos/transformers/tests/sagemaker/scripts/pytorch/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust it with vX.X.X for installing version specific transforms datasets==1.8.0
0
mavonic_private_repos/transformers/tests/sagemaker/scripts
mavonic_private_repos/transformers/tests/sagemaker/scripts/pytorch/run_ddp.py
import json import logging import os import subprocess from argparse import ArgumentParser logger = logging.getLogger(__name__) def parse_args(): parser = ArgumentParser() parsed, unknown = parser.parse_known_args() for arg in unknown: if arg.startswith(("-", "--")): parser.add_argument(arg.split("=")[0]) return parser.parse_args() def main(): args = parse_args() port = 8888 num_gpus = int(os.environ["SM_NUM_GPUS"]) hosts = json.loads(os.environ["SM_HOSTS"]) num_nodes = len(hosts) current_host = os.environ["SM_CURRENT_HOST"] rank = hosts.index(current_host) os.environ["NCCL_DEBUG"] = "INFO" if num_nodes > 1: cmd = f"""python -m torch.distributed.launch \ --nnodes={num_nodes} \ --node_rank={rank} \ --nproc_per_node={num_gpus} \ --master_addr={hosts[0]} \ --master_port={port} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" else: cmd = f"""python -m torch.distributed.launch \ --nproc_per_node={num_gpus} \ ./run_glue.py \ {"".join([f" --{parameter} {value}" for parameter,value in args.__dict__.items()])}""" try: subprocess.run(cmd, shell=True) except Exception as e: logger.info(e) if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/tests/sagemaker/scripts
mavonic_private_repos/transformers/tests/sagemaker/scripts/tensorflow/requirements.txt
git+https://github.com/huggingface/transformers.git@main # install main or adjust ist with vX.X.X for installing version specific transforms
0
mavonic_private_repos/transformers/tests/sagemaker/scripts
mavonic_private_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf.py
import argparse import logging import sys import time import tensorflow as tf from datasets import load_dataset from packaging.version import parse from transformers import AutoTokenizer, TFAutoModelForSequenceClassification try: import tf_keras as keras except (ModuleNotFoundError, ImportError): import keras if parse(keras.__version__).major > 2: raise ValueError( "Your currently installed version of Keras is Keras 3, but this is not yet supported in " "Transformers. Please install the backwards-compatible tf-keras package with " "`pip install tf-keras`." ) if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=1) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) args, _ = parser.parse_known_args() # overwrite batch size until we have tf_glue.py args.per_device_train_batch_size = 16 args.per_device_eval_batch_size = 16 # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) train_dataset = train_dataset.shuffle().select(range(5000)) # smaller the size for train dataset to 5k test_dataset = test_dataset.shuffle().select(range(500)) # smaller the size for test dataset to 500 # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])).batch( args.per_device_train_batch_size ) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])).batch( args.per_device_eval_batch_size ) # fine optimizer and loss optimizer = keras.optimizers.Adam(learning_rate=args.learning_rate) loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) start_train_time = time.time() train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.per_device_train_batch_size) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") for key, value in train_results.history.items(): logger.info(f" {key} = {value}")
0
mavonic_private_repos/transformers/tests/sagemaker/scripts
mavonic_private_repos/transformers/tests/sagemaker/scripts/tensorflow/run_tf_dist.py
import argparse import logging import os import sys import time import tensorflow as tf from datasets import load_dataset from tqdm import tqdm from transformers import AutoTokenizer, TFAutoModelForSequenceClassification from transformers.modeling_tf_utils import keras from transformers.utils import is_sagemaker_dp_enabled if os.environ.get("SDP_ENABLED") or is_sagemaker_dp_enabled(): SDP_ENABLED = True os.environ["SAGEMAKER_INSTANCE_TYPE"] = "p3dn.24xlarge" import smdistributed.dataparallel.tensorflow as sdp else: SDP_ENABLED = False def fit(model, loss, opt, train_dataset, epochs, train_batch_size, max_steps=None): pbar = tqdm(train_dataset) for i, batch in enumerate(pbar): with tf.GradientTape() as tape: inputs, targets = batch outputs = model(batch) loss_value = loss(targets, outputs.logits) if SDP_ENABLED: tape = sdp.DistributedGradientTape(tape, sparse_as_dense=True) grads = tape.gradient(loss_value, model.trainable_variables) opt.apply_gradients(zip(grads, model.trainable_variables)) pbar.set_description(f"Loss: {loss_value:.4f}") if SDP_ENABLED and i == 0: sdp.broadcast_variables(model.variables, root_rank=0) sdp.broadcast_variables(opt.variables(), root_rank=0) if max_steps and i >= max_steps: break train_results = {"loss": loss_value.numpy()} return train_results def get_datasets(tokenizer, train_batch_size, eval_batch_size): # Load dataset train_dataset, test_dataset = load_dataset("imdb", split=["train", "test"]) # Preprocess train dataset train_dataset = train_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) train_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) train_features = { x: train_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_train_dataset = tf.data.Dataset.from_tensor_slices((train_features, train_dataset["label"])) # Preprocess test dataset test_dataset = test_dataset.map( lambda e: tokenizer(e["text"], truncation=True, padding="max_length"), batched=True ) test_dataset.set_format(type="tensorflow", columns=["input_ids", "attention_mask", "label"]) test_features = { x: test_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.model_max_length]) for x in ["input_ids", "attention_mask"] } tf_test_dataset = tf.data.Dataset.from_tensor_slices((test_features, test_dataset["label"])) if SDP_ENABLED: tf_train_dataset = tf_train_dataset.shard(sdp.size(), sdp.rank()) tf_test_dataset = tf_test_dataset.shard(sdp.size(), sdp.rank()) tf_train_dataset = tf_train_dataset.batch(train_batch_size, drop_remainder=True) tf_test_dataset = tf_test_dataset.batch(eval_batch_size, drop_remainder=True) return tf_train_dataset, tf_test_dataset if __name__ == "__main__": parser = argparse.ArgumentParser() # Hyperparameters sent by the client are passed as command-line arguments to the script. parser.add_argument("--epochs", type=int, default=3) parser.add_argument("--per_device_train_batch_size", type=int, default=16) parser.add_argument("--per_device_eval_batch_size", type=int, default=8) parser.add_argument("--model_name_or_path", type=str) parser.add_argument("--learning_rate", type=str, default=5e-5) parser.add_argument("--do_train", type=bool, default=True) parser.add_argument("--do_eval", type=bool, default=True) parser.add_argument("--output_dir", type=str) parser.add_argument("--max_steps", type=int, default=None) # Data, model, and output directories parser.add_argument("--output_data_dir", type=str, default=os.environ["SM_OUTPUT_DATA_DIR"]) parser.add_argument("--model_dir", type=str, default=os.environ["SM_MODEL_DIR"]) parser.add_argument("--n_gpus", type=str, default=os.environ["SM_NUM_GPUS"]) args, _ = parser.parse_known_args() # Set up logging logger = logging.getLogger(__name__) logging.basicConfig( level=logging.getLevelName("INFO"), handlers=[logging.StreamHandler(sys.stdout)], format="%(asctime)s - %(name)s - %(levelname)s - %(message)s", ) if SDP_ENABLED: sdp.init() gpus = tf.config.experimental.list_physical_devices("GPU") for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) if gpus: tf.config.experimental.set_visible_devices(gpus[sdp.local_rank()], "GPU") # Load model and tokenizer model = TFAutoModelForSequenceClassification.from_pretrained(args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) # get datasets tf_train_dataset, tf_test_dataset = get_datasets( tokenizer=tokenizer, train_batch_size=args.per_device_train_batch_size, eval_batch_size=args.per_device_eval_batch_size, ) # fine optimizer and loss optimizer = keras.optimizers.Adam(learning_rate=args.learning_rate) loss = keras.losses.SparseCategoricalCrossentropy(from_logits=True) metrics = [keras.metrics.SparseCategoricalAccuracy()] model.compile(optimizer=optimizer, loss=loss, metrics=metrics) # Training if args.do_train: # train_results = model.fit(tf_train_dataset, epochs=args.epochs, batch_size=args.train_batch_size) start_train_time = time.time() train_results = fit( model, loss, optimizer, tf_train_dataset, args.epochs, args.per_device_train_batch_size, max_steps=args.max_steps, ) end_train_time = time.time() - start_train_time logger.info("*** Train ***") logger.info(f"train_runtime = {end_train_time}") output_eval_file = os.path.join(args.output_dir, "train_results.txt") if not SDP_ENABLED or sdp.rank() == 0: with open(output_eval_file, "w") as writer: logger.info("***** Train results *****") logger.info(train_results) for key, value in train_results.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Evaluation if args.do_eval and (not SDP_ENABLED or sdp.rank() == 0): result = model.evaluate(tf_test_dataset, batch_size=args.per_device_eval_batch_size, return_dict=True) logger.info("*** Evaluate ***") output_eval_file = os.path.join(args.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") logger.info(result) for key, value in result.items(): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Save result if SDP_ENABLED: if sdp.rank() == 0: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) else: model.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir)
0
mavonic_private_repos/transformers/tests
mavonic_private_repos/transformers/tests/fsdp/test_fsdp.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import unittest from copy import deepcopy from functools import partial from parameterized import parameterized import tests.trainer.test_trainer from tests.trainer.test_trainer import TrainerIntegrationCommon # noqa from transformers import is_torch_available from transformers.testing_utils import ( TestCasePlus, backend_device_count, execute_subprocess_async, mockenv_context, require_accelerate, require_fsdp, require_torch_accelerator, require_torch_multi_accelerator, slow, torch_device, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import FSDPOption, set_seed from transformers.utils import is_accelerate_available, is_torch_bf16_available_on_device if is_torch_available(): from transformers.pytorch_utils import is_torch_greater_or_equal_than_2_1 from transformers.trainer import FSDP_MODEL_NAME else: is_torch_greater_or_equal_than_2_1 = False # default torch.distributed port DEFAULT_MASTER_PORT = "10999" dtypes = ["fp16"] if is_torch_bf16_available_on_device(torch_device): dtypes += ["bf16"] sharding_strategies = ["full_shard", "shard_grad_op"] state_dict_types = ["FULL_STATE_DICT", "SHARDED_STATE_DICT"] set_seed(42) params = list(itertools.product(sharding_strategies, dtypes)) def get_master_port(real_launcher=False): """ When using a single gpu launcher emulation (i.e. not deepspeed or python -m torch.distributed) the issue is that once the port is tied it can't be used anywhere else outside of this process, since torch.dist doesn't free the port until the process exits. Therefore for the sake of being able to run both emulated launcher and normal launcher tests we need 2 distinct ports. This function will give the right port in the right context. For real launcher it'll give the base port, for emulated launcher it'll give the base port + 1. In both cases a string is returned. Args: `real_launcher`: whether a real launcher is going to be used, or the emulated one """ master_port_base = os.environ.get("DS_TEST_PORT", DEFAULT_MASTER_PORT) if not real_launcher: master_port_base = str(int(master_port_base) + 1) return master_port_base if is_torch_available(): from tests.trainer.test_trainer import ( # noqa RegressionModelConfig, RegressionPreTrainedModel, ) # hack to restore original logging level pre #21700 get_regression_trainer = partial(tests.trainer.test_trainer.get_regression_trainer, log_level="info") require_fsdp_version = require_fsdp if is_accelerate_available(): from accelerate.utils.constants import ( FSDP_PYTORCH_VERSION, FSDP_SHARDING_STRATEGY, ) require_fsdp_version = partial(require_fsdp, min_version=FSDP_PYTORCH_VERSION) def get_launcher(distributed=False, use_accelerate=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, backend_device_count(torch_device)) if distributed else 1 master_port = get_master_port(real_launcher=True) if use_accelerate: return f"""accelerate launch --num_processes {num_gpus} --main_process_port {master_port} --use_fsdp --fsdp_auto_wrap_policy TRANSFORMER_BASED_WRAP --fsdp_state_dict_type SHARDED_STATE_DICT --fsdp_transformer_layer_cls_to_wrap BertLayer""".split() return f"torchrun --nnodes 1 --nproc-per-node {num_gpus} --master-port {master_port}".split() def _parameterized_custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" @require_accelerate @require_torch_accelerator @require_fsdp_version class TrainerIntegrationFSDP(TestCasePlus, TrainerIntegrationCommon): def setUp(self): super().setUp() master_port = get_master_port(real_launcher=False) self.dist_env_1_gpu = { "MASTER_ADDR": "localhost", "MASTER_PORT": master_port, "RANK": "0", "LOCAL_RANK": "0", "WORLD_SIZE": "1", } self.fsdp_config = { "backward_prefetch": "backward_pre", "forward_prefetch": "False", "limit_all_gathers": "False", "use_orig_params": "True", "sync_module_states": "True", "cpu_ram_efficient_loading": "True", "activation_checkpointing": "False", "min_num_params": 1, } def tearDown(self): super().tearDown() @parameterized.expand(params, name_func=_parameterized_custom_name_func) def test_fsdp_config(self, sharding_strategy, dtype): output_dir = self.get_auto_remove_tmp_dir() kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "fsdp": f"{sharding_strategy} offload auto_wrap", "fsdp_config": self.fsdp_config, } kwargs[dtype] = True with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) self.assertEqual(trainer.args.fsdp[0], sharding_strategy) self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD) self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP) for k, v in trainer.args.fsdp_config.items(): self.assertEqual(v, self.fsdp_config[k]) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) def test_fsdp_config_transformers_auto_wrap(self, sharding_strategy, dtype): output_dir = self.get_auto_remove_tmp_dir() fsdp_config = deepcopy(self.fsdp_config) del fsdp_config["min_num_params"] fsdp_config["transformer_layer_cls_to_wrap"] = "BertLayer" kwargs = { "output_dir": output_dir, "train_len": 128, "save_steps": 5, "learning_rate": 0.1, "fsdp": f"{sharding_strategy} offload auto_wrap", "fsdp_config": fsdp_config, } kwargs[dtype] = True prefix = "FSDP_" with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(**kwargs) self.assertEqual(trainer.args.fsdp[0], sharding_strategy) self.assertEqual(trainer.args.fsdp[1], FSDPOption.OFFLOAD) self.assertEqual(trainer.args.fsdp[2], FSDPOption.AUTO_WRAP) fsdp_sharding_strategy = ( str(FSDP_SHARDING_STRATEGY.index(sharding_strategy.upper()) + 1) if is_accelerate_available("0.26.0") else sharding_strategy.upper() ) self.assertEqual(os.environ[f"{prefix}SHARDING_STRATEGY"], fsdp_sharding_strategy) self.assertEqual(os.environ[f"{prefix}OFFLOAD_PARAMS"], "true") self.assertEqual(os.environ[f"{prefix}AUTO_WRAP_POLICY"], "TRANSFORMER_BASED_WRAP") self.assertEqual( os.environ[f"{prefix}TRANSFORMER_CLS_TO_WRAP"], ",".join(fsdp_config["transformer_layer_cls_to_wrap"]) ) self.assertEqual(os.environ[f"{prefix}BACKWARD_PREFETCH"], fsdp_config["backward_prefetch"].upper()) self.assertEqual(os.environ[f"{prefix}FORWARD_PREFETCH"], fsdp_config["forward_prefetch"]) self.assertEqual(os.environ[f"{prefix}USE_ORIG_PARAMS"], fsdp_config["use_orig_params"]) self.assertEqual(os.environ[f"{prefix}SYNC_MODULE_STATES"], fsdp_config["sync_module_states"]) self.assertEqual( os.environ[f"{prefix}CPU_RAM_EFFICIENT_LOADING"], fsdp_config["cpu_ram_efficient_loading"] ) self.assertEqual(os.environ.get("ACCELERATE_USE_FSDP", "false"), "true") @parameterized.expand(params, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_basic_run(self, sharding_strategy, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}"] fsdp_args = ["--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(dtypes) @require_torch_multi_accelerator @slow @unittest.skipIf(not is_torch_greater_or_equal_than_2_1, reason="This test on pytorch 2.0 takes 4 hours.") def test_basic_run_with_cpu_offload(self, dtype): launcher = get_launcher(distributed=True, use_accelerate=False) output_dir = self.get_auto_remove_tmp_dir() args = self.get_base_args(output_dir, 1, 50).split() + [f"--{dtype}", "--max_steps", "10"] fsdp_args = ["--fsdp", "full_shard auto_wrap offload", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer"] script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] cmd = launcher + script + args + fsdp_args execute_subprocess_async(cmd, env=self.get_env()) @parameterized.expand(state_dict_types, name_func=_parameterized_custom_name_func) @require_torch_multi_accelerator @slow def test_training_and_can_resume_normally(self, state_dict_type): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) sharding_strategy = "full_shard" use_accelerate = state_dict_type == "SHARDED_STATE_DICT" launcher = get_launcher(True, use_accelerate=use_accelerate) args = self.get_base_args(output_dir, 2, 25).split() script = [f"{self.examples_dir_str}/pytorch/text-classification/run_glue.py"] logs = self.run_cmd_and_get_logs(use_accelerate, sharding_strategy, launcher, script, args, output_dir) # resume from ckpt checkpoint = os.path.join(output_dir, "checkpoint-115") resume_args = args + f"--resume_from_checkpoint {checkpoint}".split() is_fsdp_ckpt = os.path.isdir(checkpoint) and ( # this checks the FSDP state dict when `SHARDED_STATE_DICT` is used any( FSDP_MODEL_NAME in folder_name for folder_name in os.listdir(checkpoint) if os.path.isdir(os.path.join(checkpoint, folder_name)) ) # this checks the FSDP state dict when `FULL_STATE_DICT` is used or os.path.isfile(os.path.join(checkpoint, f"{FSDP_MODEL_NAME}.bin")) ) self.assertTrue(is_fsdp_ckpt) logs_resume = self.run_cmd_and_get_logs( use_accelerate, sharding_strategy, launcher, script, resume_args, output_dir ) for log, log1 in zip(logs, logs_resume): if "learning_rate" in log: self.assertAlmostEqual(log["learning_rate"], log1["learning_rate"], delta=1e-5) def run_cmd_and_get_logs(self, use_accelerate, sharding_strategy, launcher, script, args, output_dir): if not use_accelerate: fsdp_args = [ "--fsdp", f"{sharding_strategy} auto_wrap", "--fsdp_transformer_layer_cls_to_wrap", "BertLayer", ] cmd = launcher + script + args + fsdp_args else: fsdp_config = f""" --fsdp_sharding_strategy {FSDP_SHARDING_STRATEGY.index(sharding_strategy.upper()) + 1} """.split() cmd = launcher + fsdp_config + script + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) logs = TrainerState.load_from_json(os.path.join(output_dir, "trainer_state.json")).log_history return logs def get_base_args(self, output_dir, num_epochs, logging_steps): return f""" --model_name_or_path google-bert/bert-base-cased --task_name mrpc --output_dir {output_dir} --overwrite_output_dir --do_train --max_seq_length 128 --per_device_train_batch_size 16 --learning_rate 5e-5 --num_train_epochs {num_epochs} --lr_scheduler_type cosine --logging_steps {logging_steps} --save_strategy epoch --do_eval --eval_strategy epoch --report_to none """
0
mavonic_private_repos/transformers
mavonic_private_repos/transformers/examples/run_on_remote.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import shlex import runhouse as rh if __name__ == "__main__": # Refer to https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup for cloud access # setup instructions, if using on-demand hardware # If user passes --user <user> --host <host> --key_path <key_path> <example> <args>, fill them in as BYO cluster # If user passes --instance <instance> --provider <provider> <example> <args>, fill them in as on-demand cluster # Throw an error if user passes both BYO and on-demand cluster args # Otherwise, use default values parser = argparse.ArgumentParser() parser.add_argument("--user", type=str, default="ubuntu") parser.add_argument("--host", type=str, default="localhost") parser.add_argument("--key_path", type=str, default=None) parser.add_argument("--instance", type=str, default="V100:1") parser.add_argument("--provider", type=str, default="cheapest") parser.add_argument("--use_spot", type=bool, default=False) parser.add_argument("--example", type=str, default="pytorch/text-generation/run_generation.py") args, unknown = parser.parse_known_args() if args.host != "localhost": if args.instance != "V100:1" or args.provider != "cheapest": raise ValueError("Cannot specify both BYO and on-demand cluster args") cluster = rh.cluster( name="rh-cluster", ips=[args.host], ssh_creds={"ssh_user": args.user, "ssh_private_key": args.key_path} ) else: cluster = rh.cluster( name="rh-cluster", instance_type=args.instance, provider=args.provider, use_spot=args.use_spot ) example_dir = args.example.rsplit("/", 1)[0] # Set up remote environment cluster.install_packages(["pip:./"]) # Installs transformers from local source # Note transformers is copied into the home directory on the remote machine, so we can install from there cluster.run([f"pip install -r transformers/examples/{example_dir}/requirements.txt"]) cluster.run(["pip install torch --upgrade --extra-index-url https://download.pytorch.org/whl/cu117"]) # Run example. You can bypass the CLI wrapper and paste your own code here. cluster.run([f'python transformers/examples/{args.example} {" ".join(shlex.quote(arg) for arg in unknown)}']) # Alternatively, we can just import and run a training function (especially if there's no wrapper CLI): # from my_script... import train # reqs = ['pip:./', 'torch', 'datasets', 'accelerate', 'evaluate', 'tqdm', 'scipy', 'scikit-learn', 'tensorboard'] # launch_train_gpu = rh.function(fn=train, # system=gpu, # reqs=reqs, # name='train_bert_glue') # # We can pass in arguments just like we would to a function: # launch_train_gpu(num_epochs = 3, lr = 2e-5, seed = 42, batch_size = 16 # stream_logs=True)
0
mavonic_private_repos/transformers
mavonic_private_repos/transformers/examples/README.md
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Examples We host a wide range of example scripts for multiple learning frameworks. Simply choose your favorite: [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow), [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch) or [JAX/Flax](https://github.com/huggingface/transformers/tree/main/examples/flax). We also have some [research projects](https://github.com/huggingface/transformers/tree/main/examples/research_projects), as well as some [legacy examples](https://github.com/huggingface/transformers/tree/main/examples/legacy). Note that unlike the main examples these are not actively maintained, and may require specific older versions of dependencies in order to run. While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the-box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data, allowing you to tweak and edit them as required. Please discuss on the [forum](https://discuss.huggingface.co/) or in an [issue](https://github.com/huggingface/transformers/issues) a feature you would like to implement in an example before submitting a PR; we welcome bug fixes, but since we want to keep the examples as simple as possible it's unlikely that we will merge a pull request adding more functionality at the cost of readability. ## Important note **Important** To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment: ```bash git clone https://github.com/huggingface/transformers cd transformers pip install . ``` Then cd in the example folder of your choice and run ```bash pip install -r requirements.txt ``` To browse the examples corresponding to released versions of 🤗 Transformers, click on the line below and then on your desired version of the library: <details> <summary>Examples for older versions of 🤗 Transformers</summary> <ul> <li><a href="https://github.com/huggingface/transformers/tree/v4.21.0/examples">v4.21.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.20.1/examples">v4.20.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.19.4/examples">v4.19.4</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.18.0/examples">v4.18.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.17.0/examples">v4.17.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.16.2/examples">v4.16.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.15.0/examples">v4.15.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.14.1/examples">v4.14.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.13.0/examples">v4.13.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.12.5/examples">v4.12.5</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.11.3/examples">v4.11.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.10.3/examples">v4.10.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.9.2/examples">v4.9.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.8.2/examples">v4.8.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.7.0/examples">v4.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.6.1/examples">v4.6.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.5.1/examples">v4.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.4.2/examples">v4.4.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.3.3/examples">v4.3.3</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.2.2/examples">v4.2.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.1.1/examples">v4.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v4.0.1/examples">v4.0.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.5.1/examples">v3.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.4.0/examples">v3.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.3.1/examples">v3.3.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.2.0/examples">v3.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.1.0/examples">v3.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v3.0.2/examples">v3.0.2</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.11.0/examples">v2.11.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.10.0/examples">v2.10.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.9.1/examples">v2.9.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.8.0/examples">v2.8.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.7.0/examples">v2.7.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.6.0/examples">v2.6.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.5.1/examples">v2.5.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.4.0/examples">v2.4.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.3.0/examples">v2.3.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.2.0/examples">v2.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.1.0/examples">v2.1.1</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v2.0.0/examples">v2.0.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.2.0/examples">v1.2.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.1.0/examples">v1.1.0</a></li> <li><a href="https://github.com/huggingface/transformers/tree/v1.0.0/examples">v1.0.0</a></li> </ul> </details> Alternatively, you can switch your cloned 🤗 Transformers to a specific version (for instance with v3.5.1) with ```bash git checkout tags/v3.5.1 ``` and run the example command as usual afterward. ## Running the Examples on Remote Hardware with Auto-Setup [run_on_remote.py](./run_on_remote.py) is a script that launches any example on remote self-hosted hardware, with automatic hardware and environment setup. It uses [Runhouse](https://github.com/run-house/runhouse) to launch on self-hosted hardware (e.g. in your own cloud account or on-premise cluster) but there are other options for running remotely as well. You can easily customize the example used, command line arguments, dependencies, and type of compute hardware, and then run the script to automatically launch the example. You can refer to [hardware setup](https://runhouse-docs.readthedocs-hosted.com/en/latest/api/python/cluster.html#hardware-setup) for more information about hardware and dependency setup with Runhouse, or this [Colab tutorial](https://colab.research.google.com/drive/1sh_aNQzJX5BKAdNeXthTNGxKz7sM9VPc) for a more in-depth walkthrough. You can run the script with the following commands: ```bash # First install runhouse: pip install runhouse # For an on-demand V100 with whichever cloud provider you have configured: python run_on_remote.py \ --example pytorch/text-generation/run_generation.py \ --model_type=gpt2 \ --model_name_or_path=openai-community/gpt2 \ --prompt "I am a language model and" # For byo (bring your own) cluster: python run_on_remote.py --host <cluster_ip> --user <ssh_user> --key_path <ssh_key_path> \ --example <example> <args> # For on-demand instances python run_on_remote.py --instance <instance> --provider <provider> \ --example <example> <args> ``` You can also adapt the script to your own needs.
0
mavonic_private_repos/transformers/examples
mavonic_private_repos/transformers/examples/research_projects/README.md
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Research projects This folder contains various research projects using 🤗 Transformers. They are not maintained and require a specific version of 🤗 Transformers that is indicated in the requirements file of each folder. Updating them to the most recent version of the library will require some work. To use any of them, just run the command ```bash pip install -r requirements.txt ``` inside the folder of your choice. If you need help with any of those, contact the author(s), indicated at the top of the `README` of each folder.
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/xtreme-s/requirements.txt
datasets >= 1.18.0 torch >= 1.5 torchaudio librosa jiwer
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/xtreme-s/run_xtreme_s.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """ Fine-tuning a 🤗 Transformers pretrained speech model on the XTREME-S benchmark tasks""" import json import logging import os import re import sys from collections import OrderedDict, defaultdict from dataclasses import dataclass, field from typing import Dict, List, Optional, Union import datasets import numpy as np import torch from datasets import DatasetDict, load_dataset, load_metric import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, AutoModelForCTC, AutoModelForSpeechSeq2Seq, AutoProcessor, AutoTokenizer, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, Trainer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.18.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") logger = logging.getLogger(__name__) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) TASK_TO_TARGET_COLUMN_NAME = { "fleurs-asr": "transcription", "fleurs-lang_id": "lang_id", "mls": "transcription", "voxpopuli": "transcription", "covost2": "translation", "minds14": "intent_class", "babel": "transcription", } @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"}, ) cache_dir: Optional[str] = field( default=None, metadata={ "help": "Where do you want to store the pretrained models and datasets downloaded from huggingface.co" }, ) freeze_feature_encoder: bool = field( default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) attention_dropout: float = field( default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."} ) activation_dropout: float = field( default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} ) feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."}) hidden_dropout: float = field( default=0.0, metadata={ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." }, ) final_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for the final projection layer."}, ) mask_time_prob: float = field( default=0.05, metadata={ "help": ( "Probability of each feature vector along the time axis to be chosen as the start of the vector " "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature " "vectors will be masked along the time axis." ) }, ) mask_time_length: int = field( default=10, metadata={"help": "Length of vector span to mask along the time axis."}, ) mask_feature_prob: float = field( default=0.0, metadata={ "help": ( "Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan" " to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature" " bins will be masked along the time axis." ) }, ) mask_feature_length: int = field( default=10, metadata={"help": "Length of vector span to mask along the feature axis."}, ) layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."}) ctc_zero_infinity: bool = field( default=False, metadata={"help": "Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`."}, ) ctc_loss_reduction: Optional[str] = field( default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default="google/xtreme_s", metadata={"help": "The name of the dataset to use (via the datasets library). Defaults to 'google/xtreme_s'"}, ) task: str = field( default=None, metadata={ "help": ( "The task name of the benchmark to use (via the datasets library). Should be on of: " "'fleurs-asr', 'mls', 'voxpopuli', 'covost2', 'minds14', 'fleurs-lang_id', 'babel'." ) }, ) language: str = field( default="all", metadata={"help": "The language id as defined in the datasets config name or `all` for all languages."}, ) language_group: str = field( default=None, metadata={ "help": ( "The language group to select a subset of languages to train on. " "This option is only used the 'fleurs-asr' task. Should be one of: " "'western_european_we', 'eastern_european_ee', 'central_asia_middle_north_african_cmn', " "'sub_saharan_african_ssa', 'south_asian_sa', 'south_east_asian_sea', 'chinese_japanase_korean_cjk'." ) }, ) train_split_name: str = field( default="train", metadata={ "help": "The name of the training dataset split to use (via the datasets library). Defaults to 'train'" }, ) eval_split_name: str = field( default="validation", metadata={ "help": ( "The name of the evaluation dataset split to use (via the datasets library). Defaults to 'validation'" ) }, ) predict_split_name: str = field( default="test", metadata={ "help": "The name of the prediction dataset split to use (via the datasets library). Defaults to 'test'" }, ) audio_column_name: str = field( default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, ) target_column_name: str = field( default=None, metadata={ "help": ( "The name of the dataset column containing the target data (transcription/translation/label). If None," " the name will be inferred from the task. Defaults to None." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) chars_to_ignore: Optional[List[str]] = list_field( default=', ? . ! - ; : " “ % ‘ ” �'.split(" "), metadata={"help": "A list of characters to remove from the transcripts."}, ) max_duration_in_seconds: float = field( default=30.0, metadata={ "help": ( "Filter audio files that are longer than `max_duration_in_seconds` seconds to" " 'max_duration_in_seconds`" ) }, ) min_duration_in_seconds: float = field( default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} ) preprocessing_only: bool = field( default=False, metadata={ "help": ( "Whether to only do data preprocessing and skip training. This is especially useful when data" " preprocessing errors out in distributed training due to timeout. In this case, one should run the" " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" " can consequently be loaded in distributed training" ) }, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "If :obj:`True`, will use the token generated when running" ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." ) }, ) unk_token: str = field( default="[UNK]", metadata={"help": "The unk token for the tokenizer"}, ) pad_token: str = field( default="[PAD]", metadata={"help": "The padding token for the tokenizer"}, ) word_delimiter_token: str = field( default="|", metadata={"help": "The word delimiter token for the tokenizer"}, ) phoneme_language: Optional[str] = field( default=None, metadata={ "help": ( "The target language that should be used be" " passed to the tokenizer for tokenization. Note that" " this is only relevant if the model classifies the" " input audio to a sequence of phoneme sequences." ) }, ) per_lang_metrics: bool = field( default=True, metadata={ "help": ( "If `True`, compute the test metrics separately for each language, and average the results. " "If `False` compute the average test metrics in a single pass for all languages at once." ) }, ) @dataclass class SpeechDataCollatorWithPadding: processor: AutoProcessor decoder_start_token_id: Optional[int] = None padding: Union[bool, str] = "longest" pad_labels: Optional[int] = True pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lengths and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) if self.pad_labels: label_features = [{"input_ids": feature["labels"]} for feature in features] labels_batch = self.processor.pad( labels=label_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if ( self.decoder_start_token_id is not None and (labels[:, 0] == self.decoder_start_token_id).all().cpu().item() ): labels = labels[:, 1:] batch["labels"] = labels else: batch["labels"] = torch.tensor([feature["labels"] for feature in features]) return batch def create_vocabulary_from_data( datasets: DatasetDict, word_delimiter_token: Optional[str] = None, unk_token: Optional[str] = None, pad_token: Optional[str] = None, ): # Given training and test labels create vocabulary def extract_all_chars(batch): all_text = " ".join(batch["target_text"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]} vocabs = datasets.map( extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=datasets["train"].column_names, ) # take union of all unique characters in each dataset vocab_set = ( (set(vocabs["train"]["vocab"][0]) if "train" in vocabs else set()) | (set(vocabs["eval"]["vocab"][0]) if "eval" in vocabs else set()) | (set(vocabs["predict"]["vocab"][0]) if "predict" in vocabs else set()) ) vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))} # replace white space with delimiter token if word_delimiter_token is not None: vocab_dict[word_delimiter_token] = vocab_dict[" "] del vocab_dict[" "] # add unk and pad token if unk_token is not None: vocab_dict[unk_token] = len(vocab_dict) if pad_token is not None: vocab_dict[pad_token] = len(vocab_dict) return vocab_dict def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # 1. First, let's load the dataset raw_datasets = DatasetDict() task_name = data_args.task lang_id = data_args.language if task_name is None: raise ValueError( "Set --task should be set to '<xtreme_s_task>' (e.g. 'fleurs-asr', 'mls', 'covost2', 'minds14') " ) if lang_id is None: raise ValueError( "Set --language should be set to the language id of the sub dataset " "config to be used (e.g. 'pl', 'en.tr', 'fr-FR') or 'all'" " for multi-lingual fine-tuning." ) if data_args.language_group is not None: if data_args.task != "fleurs-asr": raise ValueError("--language_group should only be used with --task=fleurs-asr") if data_args.language != "all": raise ValueError("--language_group should only be used with --language=all") if data_args.target_column_name is None: target_column_name = TASK_TO_TARGET_COLUMN_NAME[task_name] else: target_column_name = data_args.target_column_name # here we differentiate between tasks with text as the target and classification tasks is_text_target = target_column_name in ("transcription", "translation") config_name = ".".join([task_name.split("-")[0], lang_id]) if training_args.do_train: raw_datasets["train"] = load_dataset( data_args.dataset_name, config_name, split=data_args.train_split_name, token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'." " Make sure to set `--audio_column_name` to the correct audio column - one of" f" {', '.join(raw_datasets['train'].column_names)}." ) if target_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--target_column_name {target_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--target_column_name` to the correct text column - one of " f"{', '.join(raw_datasets['train'].column_names)}." ) if data_args.max_train_samples is not None: raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) if training_args.do_eval: raw_datasets["eval"] = load_dataset( data_args.dataset_name, config_name, split=data_args.eval_split_name, token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) if data_args.max_eval_samples is not None: raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) if training_args.do_predict: raw_datasets["predict"] = load_dataset( data_args.dataset_name, config_name, split=data_args.predict_split_name, token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) if data_args.max_predict_samples is not None: raw_datasets["predict"] = raw_datasets["predict"].select(range(data_args.max_predict_samples)) lang_list = next(iter(raw_datasets.values())).features["lang_id"].names if not is_text_target: label_list = next(iter(raw_datasets.values())).features[target_column_name].names num_labels = len(label_list) num_workers = data_args.preprocessing_num_workers lang_group = data_args.language_group if lang_group is not None: with training_args.main_process_first(desc="language group filter"): lang_group_id = next(iter(raw_datasets.values())).features["lang_group_id"].str2int(lang_group) raw_datasets = raw_datasets.filter( lambda lang_group: lang_group == lang_group_id, num_proc=num_workers, input_columns=["lang_group_id"], ) # 2. We remove some special characters from the datasets # that make training complicated and do not help in transcribing the speech # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic # that could be easily picked up by the model chars_to_ignore_regex = ( f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None ) def remove_special_characters(batch): if chars_to_ignore_regex is not None: batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower() + " " else: batch["target_text"] = batch[target_column_name].lower() + " " return batch if is_text_target: with training_args.main_process_first(desc="dataset map special characters removal"): raw_datasets = raw_datasets.map( remove_special_characters, remove_columns=[target_column_name], desc="remove special characters from datasets", ) # save special tokens for tokenizer word_delimiter_token = data_args.word_delimiter_token unk_token = data_args.unk_token pad_token = data_args.pad_token # 3. Next, let's load the config as we might need it to create # the tokenizer config = AutoConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) if is_text_target: # 4. (Optional, for ASR and translation) If no tokenizer file is defined, # we create the vocabulary of the model by extracting all unique characters from # the training and evaluation datasets # We need to make sure that only first rank saves vocabulary # make sure all processes wait until vocab is created tokenizer_name_or_path = model_args.tokenizer_name_or_path tokenizer_kwargs = {} if tokenizer_name_or_path is None: # save vocab in training output dir tokenizer_name_or_path = training_args.output_dir vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") with training_args.main_process_first(): if training_args.overwrite_output_dir and os.path.isfile(vocab_file): os.remove(vocab_file) with training_args.main_process_first(desc="dataset map vocabulary creation"): if not os.path.isfile(vocab_file): os.makedirs(tokenizer_name_or_path, exist_ok=True) vocab_dict = create_vocabulary_from_data( raw_datasets, word_delimiter_token=word_delimiter_token, unk_token=unk_token, pad_token=pad_token, ) # save vocab dict to be loaded into tokenizer with open(vocab_file, "w") as file: json.dump(vocab_dict, file) # if tokenizer has just been created # it is defined by `tokenizer_class` if present in config else by `model_type` if not config.is_encoder_decoder: tokenizer_kwargs = { "config": config if config.tokenizer_class is not None else None, "tokenizer_type": config.model_type if config.tokenizer_class is None else None, "unk_token": unk_token, "pad_token": pad_token, "word_delimiter_token": word_delimiter_token, } else: tokenizer_kwargs = {} # 5. Now we can instantiate the feature extractor, tokenizer and model # Note for distributed training, the .from_pretrained methods guarantee that only # one local process can concurrently download model & vocab. # load feature_extractor and tokenizer if is_text_target: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, token=data_args.use_auth_token, **tokenizer_kwargs, ) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, token=data_args.use_auth_token ) # adapt config # (speech translation requires pre-configured seq2seq models) if task_name != "covost2": config.update( { "feat_proj_dropout": model_args.feat_proj_dropout, "attention_dropout": model_args.attention_dropout, "hidden_dropout": model_args.hidden_dropout, "final_dropout": model_args.final_dropout, "mask_time_prob": model_args.mask_time_prob, "mask_time_length": model_args.mask_time_length, "mask_feature_prob": model_args.mask_feature_prob, "mask_feature_length": model_args.mask_feature_length, "gradient_checkpointing": training_args.gradient_checkpointing, "layerdrop": model_args.layerdrop, "ctc_zero_infinity": model_args.ctc_zero_infinity, "ctc_loss_reduction": model_args.ctc_loss_reduction, "activation_dropout": model_args.activation_dropout, } ) if training_args.do_train: if is_text_target: config.pad_token_id = tokenizer.pad_token_id config.vocab_size = len(tokenizer) else: label_to_id = {v: i for i, v in enumerate(label_list)} config.label2id = label_to_id config.id2label = {id: label for label, id in label_to_id.items()} config.num_labels = num_labels # create model if target_column_name == "transcription": model = AutoModelForCTC.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, token=data_args.use_auth_token, ) elif config.is_encoder_decoder: model = AutoModelForSpeechSeq2Seq.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, token=data_args.use_auth_token, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") else: model = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, token=data_args.use_auth_token, ) # freeze encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() # 6. Now we preprocess the datasets including loading the audio, resampling and normalization # Thankfully, `datasets` takes care of automatically loading and resampling the audio, # so that we just need to set the correct target sampling rate and normalize the input # via the `feature_extractor` # make sure that dataset decodes audio with correct sampling rate dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate if dataset_sampling_rate != feature_extractor.sampling_rate: raw_datasets = raw_datasets.cast_column( data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) # derive max & min input length for sample rate & max duration max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate audio_column_name = data_args.audio_column_name # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification phoneme_language = data_args.phoneme_language # Preprocessing the datasets. # We need to read the audio files as arrays and tokenize the targets. def prepare_dataset(batch): # load audio sample = batch[audio_column_name] inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) batch["input_values"] = inputs.input_values[0] batch["length"] = len(batch["input_values"]) # encode targets additional_kwargs = {} if phoneme_language is not None: additional_kwargs["phonemizer_lang"] = phoneme_language if is_text_target: batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids else: batch["labels"] = batch[target_column_name] batch["lang"] = batch["lang_id"] return batch with training_args.main_process_first(desc="dataset map preprocessing"): vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=num_workers, desc="preprocess datasets", ) if training_args.do_train: def is_audio_in_length_range(length): return length > min_input_length and length < max_input_length # filter data that is shorter than min_input_length vectorized_datasets["train"] = vectorized_datasets["train"].filter( is_audio_in_length_range, num_proc=num_workers, input_columns=["length"], ) # 7. Next, we can prepare for the training step. # Let's use the appropriate XTREME-S evaluation metric, # instantiate a data collator and the trainer # Define evaluation metrics during training, *i.e.* word error rate, character error rate eval_metric = load_metric("xtreme_s", task_name) # for large datasets it is advised to run the preprocessing on a # single machine first with ``args.preprocessing_only`` since there will mostly likely # be a timeout when running the script in distributed mode. # In a second step ``args.preprocessing_only`` can then be set to `False` to load the # cached dataset if data_args.preprocessing_only: logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}") return def asr_logits_argmax(logits, labels): return logits.argmax(dim=-1) def compute_asr_metric(pred): pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(pred.predictions) # we do not want to group tokens when computing the metrics label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) metric = eval_metric.compute(predictions=pred_str, references=label_str) return metric def compute_classification_metric(pred): pred_ids = np.argmax(pred.predictions, axis=1) metric = eval_metric.compute(predictions=pred_ids, references=pred.label_ids) return metric # Now save everything to be able to create a single processor later if is_main_process(training_args.local_rank): # save feature extractor, tokenizer and config feature_extractor.save_pretrained(training_args.output_dir) if is_text_target: tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) # wait until configs are saved in the main process before loading the processor if training_args.local_rank != -1: torch.distributed.barrier() if is_text_target: processor = AutoProcessor.from_pretrained(training_args.output_dir) else: processor = AutoFeatureExtractor.from_pretrained(training_args.output_dir) # Instantiate custom data collator data_collator = SpeechDataCollatorWithPadding(processor=processor, pad_labels=is_text_target) # Initialize Trainer if target_column_name == "translation": trainer = Seq2SeqTrainer( model=model, data_collator=data_collator, args=training_args, preprocess_logits_for_metrics=asr_logits_argmax if training_args.predict_with_generate else None, compute_metrics=compute_asr_metric if training_args.predict_with_generate else None, train_dataset=vectorized_datasets["train"] if training_args.do_train else None, eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, tokenizer=feature_extractor, ) else: trainer = Trainer( model=model, data_collator=data_collator, args=training_args, preprocess_logits_for_metrics=asr_logits_argmax if is_text_target else None, compute_metrics=compute_asr_metric if is_text_target else compute_classification_metric, train_dataset=vectorized_datasets["train"] if training_args.do_train else None, eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, tokenizer=feature_extractor, ) # 8. Finally, we can start training # Training if training_args.do_train: # use last checkpoint if exist if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(vectorized_datasets["train"]) ) metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation on the test set results = {} if training_args.do_predict: logger.info(f"*** Evaluating on the `{data_args.predict_split_name}` set ***") if data_args.per_lang_metrics: # separate the `test` dataset into language-specific subsets and compute metrics for each of them metrics = {} average_metrics = defaultdict(list) for lang_id in range(len(lang_list)): lang_name = lang_list[lang_id] with training_args.main_process_first(desc="per-language dataset filter"): lang_dataset = vectorized_datasets["predict"].filter( lambda lang: lang == lang_id, num_proc=num_workers, input_columns=["lang"], ) lang_metrics = trainer.evaluate(lang_dataset) redundant_metrics = ["eval_runtime", "eval_samples_per_second", "eval_steps_per_second", "eval_epoch"] for metric_name, value in lang_metrics.items(): average_metrics[metric_name].append(value) if metric_name not in redundant_metrics: metrics[f"{metric_name}_{lang_name}"] = value for metric_name, value in average_metrics.items(): metrics[metric_name] = np.mean(value) else: metrics = trainer.evaluate(vectorized_datasets["predict"]) max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(vectorized_datasets["predict"]) ) metrics["predict_samples"] = min(max_predict_samples, len(vectorized_datasets["predict"])) # make sure that the `predict` metrics end up in the log history for the model card trainer.log(OrderedDict(sorted(metrics.items()))) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": task_name, "tags": [task_name, data_args.dataset_name], "dataset_args": ( f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:" f" {data_args.eval_split_name}, Predict split: {data_args.predict_split_name}" ), "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}", "language": data_args.language, } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/xtreme-s/README.md
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # XTREME-S benchmark examples *Maintainers: [Anton Lozhkov](https://github.com/anton-l) and [Patrick von Platen](https://github.com/patrickvonplaten)* The Cross-lingual TRansfer Evaluation of Multilingual Encoders for Speech (XTREME-S) benchmark is a benchmark designed to evaluate speech representations across languages, tasks, domains and data regimes. It covers XX typologically diverse languages and seven downstream tasks grouped in four families: speech recognition, translation, classification and retrieval. XTREME-S covers speech recognition with Fleurs, Multilingual LibriSpeech (MLS) and VoxPopuli, speech translation with CoVoST-2, speech classification with LangID (Fleurs) and intent classification (MInds-14) and finally speech(-text) retrieval with Fleurs. Each of the tasks covers a subset of the 102 languages included in XTREME-S (shown here with their ISO 3166-1 codes): afr, amh, ara, asm, ast, azj, bel, ben, bos, cat, ceb, ces, cmn, cym, dan, deu, ell, eng, spa, est, fas, ful, fin, tgl, fra, gle, glg, guj, hau, heb, hin, hrv, hun, hye, ind, ibo, isl, ita, jpn, jav, kat, kam, kea, kaz, khm, kan, kor, ckb, kir, ltz, lug, lin, lao, lit, luo, lav, mri, mkd, mal, mon, mar, msa, mlt, mya, nob, npi, nld, nso, nya, oci, orm, ory, pan, pol, pus, por, ron, rus, bul, snd, slk, slv, sna, som, srp, swe, swh, tam, tel, tgk, tha, tur, ukr, umb, urd, uzb, vie, wol, xho, yor, yue and zul. Paper: [XTREME-S: Evaluating Cross-lingual Speech Representations](https://arxiv.org/abs/2203.10752) Dataset: [https://huggingface.co/datasets/google/xtreme_s](https://huggingface.co/datasets/google/xtreme_s) ## Fine-tuning for the XTREME-S tasks Based on the [`run_xtreme_s.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/xtreme-s/run_xtreme_s.py) script. This script can fine-tune any of the pretrained speech models on the [hub](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition) on the [XTREME-S dataset](https://huggingface.co/datasets/google/xtreme_s) tasks. XTREME-S is made up of 7 different tasks. Here is how to run the script on each of them: ```bash export TASK_NAME=mls.all python run_xtreme_s.py \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --task="${TASK_NAME}" \ --output_dir="xtreme_s_xlsr_${TASK_NAME}" \ --num_train_epochs=100 \ --per_device_train_batch_size=32 \ --learning_rate="3e-4" \ --target_column_name="transcription" \ --save_steps=500 \ --eval_steps=500 \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --push_to_hub ``` where `TASK_NAME` can be one of: `mls, voxpopuli, covost2, fleurs-asr, fleurs-lang_id, minds14`. We get the following results on the test set of the benchmark's datasets. The corresponding training commands for each dataset are given in the sections below: | Task | Dataset | Result | Fine-tuned model & logs | Training time | GPUs | |-----------------------|-----------|-----------------------|--------------------------------------------------------------------|---------------|--------| | Speech Recognition | MLS | 30.33 WER | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_mls/) | 18:47:25 | 8xV100 | | Speech Recognition | VoxPopuli | - | - | - | - | | Speech Recognition | FLEURS | - | - | - | - | | Speech Translation | CoVoST-2 | - | - | - | - | | Speech Classification | Minds-14 | 90.15 F1 / 90.33 Acc. | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_minds14/) | 2:54:21 | 2xA100 | | Speech Classification | FLEURS | - | - | - | - | | Speech Retrieval | FLEURS | - | - | - | - | ### Speech Recognition with MLS The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#multilingual-librispeech-mls) using 8 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=8 \ run_xtreme_s.py \ --task="mls" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_mls" \ --overwrite_output_dir \ --num_train_epochs=100 \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=1 \ --gradient_accumulation_steps=2 \ --learning_rate="3e-4" \ --warmup_steps=3000 \ --eval_strategy="steps" \ --max_duration_in_seconds=20 \ --save_steps=500 \ --eval_steps=500 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="wer" \ --greater_is_better=False \ --load_best_model_at_end \ --push_to_hub ``` On 8 V100 GPUs, this script should run in ~19 hours and yield a cross-entropy loss of **0.6215** and word error rate of **30.33** ### Speech Classification with Minds-14 The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#intent-classification---minds-14) using 2 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=2 \ run_xtreme_s.py \ --task="minds14" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_minds14" \ --overwrite_output_dir \ --num_train_epochs=50 \ --per_device_train_batch_size=32 \ --per_device_eval_batch_size=8 \ --gradient_accumulation_steps=1 \ --learning_rate="3e-4" \ --warmup_steps=1500 \ --eval_strategy="steps" \ --max_duration_in_seconds=30 \ --save_steps=200 \ --eval_steps=200 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="f1" \ --greater_is_better=True \ --load_best_model_at_end \ --push_to_hub ``` On 2 A100 GPUs, this script should run in ~5 hours and yield a cross-entropy loss of **0.4119** and F1 score of **90.15**
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/tapex/run_wikisql_with_tapex.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for tapex on table-based question answering tasks. Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py """ import logging import os import sys from collections import defaultdict from copy import deepcopy from dataclasses import dataclass, field from functools import partial from typing import List, Optional import nltk # Here to have a nice missing dependency error message early on import numpy as np import pandas as pd from datasets import load_dataset from filelock import FileLock from wikisql_utils import _TYPE_CONVERTER, retrieve_wikisql_query_answer_tapas import transformers from transformers import ( AutoConfig, BartForConditionalGeneration, DataCollatorForSeq2Seq, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, TapexTokenizer, set_seed, ) from transformers.file_utils import is_offline_mode from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={ "help": ( "Pretrained tokenizer name or path if not the same as model_name. " "By default we use BART-large tokenizer for TAPEX-large." ) }, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default="wikisql", metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus # we should disable it here to avoid problematic generation config.no_repeat_ngram_size = 0 config.max_length = 1024 config.early_stopping = False # load tapex tokenizer tokenizer = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, add_prefix_space=True, ) # load Bart based Tapex model (default tapex-large) model = BartForConditionalGeneration.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = datasets["train"].column_names elif training_args.do_eval: column_names = datasets["validation"].column_names elif training_args.do_predict: column_names = datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) def preprocess_tableqa_function(examples, is_training=False): """ The is_training FLAG is used to identify if we could use the supervision to truncate the table content if it is required. """ # this function is specific for WikiSQL since the util function need the data structure # to retrieve the WikiSQL answer for each question def _convert_table_types(_table): """Runs the type converter over the table cells.""" ret_table = deepcopy(_table) types = ret_table["types"] ret_table["real_rows"] = ret_table["rows"] typed_rows = [] for row in ret_table["rows"]: typed_row = [] for column, cell_value in enumerate(row): typed_row.append(_TYPE_CONVERTER[types[column]](cell_value)) typed_rows.append(typed_row) ret_table["rows"] = typed_rows return ret_table questions = [question.lower() for question in examples["question"]] example_tables = examples["table"] example_sqls = examples["sql"] tables = [ pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) for example_table in example_tables ] # using tapas utils to obtain wikisql answer answers = [] for example_sql, example_table in zip(example_sqls, example_tables): tapas_table = _convert_table_types(example_table) answer_list: List[str] = retrieve_wikisql_query_answer_tapas(tapas_table, example_sql) # you can choose other delimiters to split each answer answers.append(answer_list) # IMPORTANT: we cannot pass by answers during evaluation, answers passed during training are used to # truncate large tables in the train set! if is_training: model_inputs = tokenizer( table=tables, query=questions, answer=answers, max_length=data_args.max_source_length, padding=padding, truncation=True, ) else: model_inputs = tokenizer( table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True ) labels = tokenizer( answer=[", ".join(answer) for answer in answers], max_length=max_target_length, padding=padding, truncation=True, ) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs # in training, we can use the answer as extra information to truncate large tables preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( preprocess_tableqa_function_training, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_tableqa_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: max_target_length = data_args.val_max_target_length if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = datasets["test"] if data_args.max_predict_samples is not None: predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) predict_dataset = predict_dataset.map( preprocess_tableqa_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] return preds, labels def compute_metrics(eval_preds): preds, labels = eval_preds if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) if data_args.ignore_pad_token_for_loss: # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) delimiter = ", " # define example evaluation def evaluate_example(predict_str: str, ground_str: str): predict_spans = predict_str.split(delimiter) ground_spans = ground_str.split(delimiter) predict_values = defaultdict(lambda: 0) ground_values = defaultdict(lambda: 0) for span in predict_spans: try: predict_values[float(span)] += 1 except ValueError: predict_values[span.strip()] += 1 for span in ground_spans: try: ground_values[float(span)] += 1 except ValueError: ground_values[span.strip()] += 1 is_correct = predict_values == ground_values return is_correct def get_denotation_accuracy(predictions: List[str], references: List[str]): assert len(predictions) == len(references) correct_num = 0 for predict_str, ground_str in zip(predictions, references): is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) if is_correct: correct_num += 1 return correct_num / len(predictions) accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) result = {"denotation_accuracy": accuracy} return result # Initialize our Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate( max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" ) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") predict_results = trainer.predict( predict_dataset, metric_key_prefix="predict", max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, ) metrics = predict_results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = tokenizer.batch_decode( predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") with open(output_prediction_file, "w") as writer: writer.write("\n".join(predictions)) return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/tapex/run_tabfact_with_tapex.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for tapex on table-based fact verification tasks. Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/text-classification/run_glue.py """ import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np import pandas as pd from datasets import load_dataset import transformers from transformers import ( AutoConfig, BartForSequenceClassification, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, TapexTokenizer, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt") logger = logging.getLogger(__name__) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default="tab_fact", metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default="tab_fact", metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}, ) max_seq_length: int = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) train_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) def __post_init__(self): if self.dataset_name is not None: pass elif self.train_file is None or self.validation_file is None: raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.") else: train_extension = self.train_file.split(".")[-1] assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file." validation_extension = self.validation_file.split(".")[-1] assert ( validation_extension == train_extension ), "`validation_file` should have the same extension (csv or json) as `train_file`." @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default=None, metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": data_args.train_file, "validation": data_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if training_args.do_predict: if data_args.test_file is not None: train_extension = data_args.train_file.split(".")[-1] test_extension = data_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = data_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") for key in data_files.keys(): logger.info(f"load a local file for {key}: {data_files[key]}") if data_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=model_args.cache_dir) else: # Loading a dataset from local json files raw_datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets. # Labels label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) # Load pretrained model and tokenizer # # In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # load tapex tokenizer tokenizer = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, add_prefix_space=True, ) model = BartForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # Padding strategy if data_args.pad_to_max_length: padding = "max_length" else: # We will pad later, dynamically at batch creation, to the max sequence length in each batch padding = False # Some models have set the order of the labels to use, so let's make sure we do use it. model.config.label2id = {"Refused": 0, "Entailed": 1} model.config.id2label = {0: "Refused", 1: "Entailed"} if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the " f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." ) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) def preprocess_tabfact_function(examples): # Tokenize the texts def _convert_table_text_to_pandas(_table_text): """Runs the structured pandas table object for _table_text. An example _table_text can be: round#clubs remaining\nfirst round#156\n """ _table_content = [_table_row.split("#") for _table_row in _table_text.strip("\n").split("\n")] _table_pd = pd.DataFrame.from_records(_table_content[1:], columns=_table_content[0]) return _table_pd questions = examples["statement"] tables = list(map(_convert_table_text_to_pandas, examples["table_text"])) result = tokenizer(tables, questions, padding=padding, max_length=max_seq_length, truncation=True) result["label"] = examples["label"] return result with training_args.main_process_first(desc="dataset map pre-processing"): raw_datasets = raw_datasets.map( preprocess_tabfact_function, batched=True, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on dataset", ) if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in raw_datasets and "validation_matched" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) if training_args.do_predict or data_args.test_file is not None: if "test" not in raw_datasets and "test_matched" not in raw_datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = raw_datasets["test"] if data_args.max_predict_samples is not None: predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) # Log a few random samples from the training set: if training_args.do_train: for index in random.sample(range(len(train_dataset)), 3): logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") # You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. def compute_metrics(p: EvalPrediction): preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions preds = np.argmax(preds, axis=1) return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} # Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding. if data_args.pad_to_max_length: data_collator = default_data_collator elif training_args.fp16: data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) else: data_collator = None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.save_model() # Saves the tokenizer too for easy upload trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate(eval_dataset=eval_dataset) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") # Removing the `label` columns because it contains -1 and Trainer won't like that. predict_dataset = predict_dataset.remove_columns("label") predictions = trainer.predict(predict_dataset, metric_key_prefix="predict").predictions predictions = np.argmax(predictions, axis=1) output_predict_file = os.path.join(training_args.output_dir, "predict_results_tabfact.txt") if trainer.is_world_process_zero(): with open(output_predict_file, "w") as writer: logger.info("***** Predict Results *****") writer.write("index\tprediction\n") for index, item in enumerate(predictions): item = label_list[item] writer.write(f"{index}\t{item}\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/tapex/run_wikitablequestions_with_tapex.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The Microsoft and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for tapex on table-based question answering tasks. Adapted from script: https://github.com/huggingface/transformers/blob/master/examples/pytorch/summarization/run_summarization.py """ import logging import os import sys from collections import defaultdict from dataclasses import dataclass, field from functools import partial from typing import List, Optional import nltk # Here to have a nice missing dependency error message early on import numpy as np import pandas as pd from datasets import load_dataset from filelock import FileLock import transformers from transformers import ( AutoConfig, BartForConditionalGeneration, DataCollatorForSeq2Seq, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, TapexTokenizer, set_seed, ) from transformers.file_utils import is_offline_mode from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.17.0.dev0") logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={ "help": ( "Pretrained tokenizer name or path if not the same as model_name. " "By default we use BART-large tokenizer for TAPEX-large." ) }, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default="wikitablequestions", metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=None, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table. # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] datasets = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) # IMPORTANT: the initial BART model's decoding is penalized by no_repeat_ngram_size, and thus # we should disable it here to avoid problematic generation config.no_repeat_ngram_size = 0 config.max_length = 1024 config.early_stopping = False # load tapex tokenizer tokenizer = TapexTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, add_prefix_space=True, ) # load Bart based Tapex model (default tapex-large) model = BartForConditionalGeneration.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") # Preprocessing the datasets. # We need to tokenize inputs and targets. if training_args.do_train: column_names = datasets["train"].column_names elif training_args.do_eval: column_names = datasets["validation"].column_names elif training_args.do_predict: column_names = datasets["test"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.") return # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"): logger.warning( "label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for " f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory" ) def preprocess_tableqa_function(examples, is_training=False): """ The is_training FLAG is used to identify if we could use the supervision to truncate the table content if it is required. """ questions = [question.lower() for question in examples["question"]] example_tables = examples["table"] tables = [ pd.DataFrame.from_records(example_table["rows"], columns=example_table["header"]) for example_table in example_tables ] # using wikitablequestion's answer set answers = examples["answers"] # IMPORTANT: we cannot pass by answers during evaluation, answers passed during training are used to # truncate large tables in the train set! if is_training: model_inputs = tokenizer( table=tables, query=questions, answer=answers, max_length=data_args.max_source_length, padding=padding, truncation=True, ) else: model_inputs = tokenizer( table=tables, query=questions, max_length=data_args.max_source_length, padding=padding, truncation=True ) labels = tokenizer( answer=[", ".join(answer) for answer in answers], max_length=max_target_length, padding=padding, truncation=True, ) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs # in training, we can use the answer as extra information to truncate large tables preprocess_tableqa_function_training = partial(preprocess_tableqa_function, is_training=True) if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( preprocess_tableqa_function_training, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_tableqa_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: max_target_length = data_args.val_max_target_length if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = datasets["test"] if data_args.max_predict_samples is not None: predict_dataset = predict_dataset.select(range(data_args.max_predict_samples)) predict_dataset = predict_dataset.map( preprocess_tableqa_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=8 if training_args.fp16 else None, ) def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] return preds, labels def compute_metrics(eval_preds): preds, labels = eval_preds if isinstance(preds, tuple): preds = preds[0] decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True) if data_args.ignore_pad_token_for_loss: # Replace -100 in the labels as we can't decode them. labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) # Some simple post-processing decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) delimiter = ", " # define example evaluation def evaluate_example(predict_str: str, ground_str: str): predict_spans = predict_str.split(delimiter) ground_spans = ground_str.split(delimiter) predict_values = defaultdict(lambda: 0) ground_values = defaultdict(lambda: 0) for span in predict_spans: try: predict_values[float(span)] += 1 except ValueError: predict_values[span.strip()] += 1 for span in ground_spans: try: ground_values[float(span)] += 1 except ValueError: ground_values[span.strip()] += 1 _is_correct = predict_values == ground_values return _is_correct def get_denotation_accuracy(predictions: List[str], references: List[str]): assert len(predictions) == len(references) correct_num = 0 for predict_str, ground_str in zip(predictions, references): is_correct = evaluate_example(predict_str.lower(), ground_str.lower()) if is_correct: correct_num += 1 return correct_num / len(predictions) accuracy = get_denotation_accuracy(decoded_preds, decoded_labels) result = {"denotation_accuracy": accuracy} return result # Initialize our Trainer trainer = Seq2SeqTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics if training_args.predict_with_generate else None, ) if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate( max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, metric_key_prefix="eval" ) max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) if training_args.do_predict: logger.info("*** Predict ***") predict_results = trainer.predict( predict_dataset, metric_key_prefix="predict", max_length=data_args.val_max_target_length, num_beams=data_args.num_beams, ) metrics = predict_results.metrics max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset) ) metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset)) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) if trainer.is_world_process_zero(): if training_args.predict_with_generate: predictions = tokenizer.batch_decode( predict_results.predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True ) predictions = [pred.strip() for pred in predictions] output_prediction_file = os.path.join(training_args.output_dir, "tapex_predictions.txt") with open(output_prediction_file, "w") as writer: writer.write("\n".join(predictions)) return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/tapex/requirements.txt
numpy datasets pandas nltk
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/tapex/wikisql_utils.py
# coding=utf-8 # Copyright 2022 The Microsoft, The Google and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import enum import functools import math import re # The following script is adapted from the script of TaPas. # Original: https://github.com/google-research/tapas/master/wikisql_utils.py from typing import Any, List EMPTY_ANSWER = "none" EMPTY_ANSWER_AGG = "none" def _split_thousands(delimiter, value): split = value.split(delimiter) return len(split) > 1 and any((len(x) == 3 for x in split)) def convert_to_float(value): """Converts value to a float using a series of increasingly complex heuristics. Args: value: object that needs to be converted. Allowed types include float/int/strings. Returns: A float interpretation of value. Raises: ValueError if the float conversion of value fails. """ if isinstance(value, float): return value if isinstance(value, int): return float(value) if not isinstance(value, str): raise ValueError("Argument value is not a string. Can't parse it as float") sanitized = value try: # Example: 1,000.7 if "." in sanitized and "," in sanitized: return float(sanitized.replace(",", "")) # 1,000 if "," in sanitized and _split_thousands(",", sanitized): return float(sanitized.replace(",", "")) # 5,5556 if "," in sanitized and sanitized.count(",") == 1 and not _split_thousands(",", sanitized): return float(sanitized.replace(",", ".")) # 0.0.0.1 if sanitized.count(".") > 1: return float(sanitized.replace(".", "")) # 0,0,0,1 if sanitized.count(",") > 1: return float(sanitized.replace(",", "")) return float(sanitized) except ValueError: # Avoid adding the sanitized value in the error message. raise ValueError("Unable to convert value to float") def _normalize_float(answer): if answer is None: return None try: value = convert_to_float(answer) if isinstance(value, float) and math.isnan(value): return None return value except ValueError: return answer.lower() _TYPE_CONVERTER = { "text": lambda x: x, "real": convert_to_float, } class _Aggregation(enum.Enum): """Aggregations as defined by WikiSQL. Indexes match the data.""" NONE = 0 MAX = 1 MIN = 2 COUNT = 3 SUM = 4 AVERAGE = 5 class _Operator(enum.Enum): """The boolean operators used by WikiSQL. Indexes match the data.""" EQUALS = 0 GREATER = 1 LESSER = 2 @dataclasses.dataclass class _Condition: """Represents an SQL where clauses (e.g A = "a" or B > 5).""" column: str operator: _Operator cmp_value: Any _TOKENIZER = re.compile(r"\w+|[^\w\s]+", re.UNICODE | re.MULTILINE | re.DOTALL) def _normalize_for_match(x): return list(_TOKENIZER.findall(x.lower())) def _compare(operator, src, tgt): if operator == _Operator.EQUALS: return src == tgt elif operator == _Operator.GREATER: return src > tgt elif operator == _Operator.LESSER: return src < tgt raise ValueError(f"Unknown operator: {operator}") def _parse_value(table, column, cell_value): """Convert numeric values to floats and keeps everything else as string.""" types = table["types"] return _TYPE_CONVERTER[types[column]](cell_value) def _is_string(x): return isinstance(x, str) def _respect_conditions(table, row, conditions): """True if 'row' satisfies all 'conditions'.""" for cond in conditions: table_value = row[cond.column] cmp_value = _parse_value(table, cond.column, cond.cmp_value) if _is_string(table_value) and _is_string(cmp_value): table_value = _normalize_for_match(table_value) cmp_value = _normalize_for_match(cmp_value) if not isinstance(table_value, type(cmp_value)): raise ValueError("Type difference {} != {}".format(type(table_value), type(cmp_value))) if not _compare(cond.operator, table_value, cmp_value): return False return True def _get_float_answer(table, answer_coordinates, aggregation_op): """Applies operation to produce reference float answer.""" if not answer_coordinates: if aggregation_op == _Aggregation.COUNT: return 0.0 else: return EMPTY_ANSWER_AGG # Count can support non numeric answers. if aggregation_op == _Aggregation.COUNT: return float(len(answer_coordinates)) # If we have just one answer, if float returns it or try a conversion. values = [table["rows"][i][j] for (i, j) in answer_coordinates] if len(answer_coordinates) == 1: try: return convert_to_float(values[0]) except ValueError as e: if aggregation_op != _Aggregation.NONE: raise e if aggregation_op == _Aggregation.NONE: return None # Other aggregation only support numeric values. Bail out if we have strings. if not all((isinstance(v, (int, float)) for v in values)): return None if aggregation_op == _Aggregation.SUM: return float(sum(values)) elif aggregation_op == _Aggregation.AVERAGE: return sum(values) / len(answer_coordinates) else: raise ValueError(f"Unknown aggregation: {aggregation_op}") def _get_answer_coordinates(table, sql_query): """Retrieves references coordinates by executing SQL.""" # MAX and MIN are automatically supported by the model. aggregation_op_index = sql_query["agg"] if aggregation_op_index >= 3: aggregation_op = _Aggregation(aggregation_op_index) else: aggregation_op = _Aggregation.NONE target_column = sql_query["sel"] conditions = [ _Condition(column, _Operator(operator), cmp_value) for column, operator, cmp_value in zip( sql_query["conds"]["column_index"], sql_query["conds"]["operator_index"], sql_query["conds"]["condition"] ) ] indices = [] for row in range(len(table["rows"])): if _respect_conditions(table, table["rows"][row], conditions): indices.append((row, target_column)) if not indices: return [], aggregation_op if len(indices) == 1: return indices, aggregation_op # Parsing of MIN/MAX. if aggregation_op_index in (1, 2): operators = {2: min, 1: max} values = [(table["rows"][i][j], index) for index, (i, j) in enumerate(indices)] reduced = functools.reduce(operators[sql_query["agg"]], values) ret = [indices[reduced[1]]] return ret, _Aggregation.NONE return indices, aggregation_op def _get_answer_text(table, answer_coordinates, float_answer): if float_answer is not None: return [str(float_answer)] return [str(table["real_rows"][r][c]) for r, c in answer_coordinates] def retrieve_wikisql_query_answer_tapas(table, example) -> List: answer_coordinates, aggregation_op = _get_answer_coordinates(table, example) float_answer = _get_float_answer(table, answer_coordinates, aggregation_op) answer_text = _get_answer_text(table, answer_coordinates, float_answer) # keep the original data the same with TaPas if len(answer_text) == 0: answer_text = [EMPTY_ANSWER] return answer_text
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/tapex/README.md
<!--- Copyright 2022 The Microsoft Inc. and The HuggingFace Inc. Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Run Table Tasks with TAPEX TAPEX is a table pre-training approach for table-related tasks. By learning a neural SQL executor over a synthetic corpus based on generative language models (e.g., BART), it achieves state-of-the-art performance on several table-based question answering benchmarks and table-based fact verification benchmark. More details can be found in the original paper [TAPEX: Table Pre-training via Learning a Neural SQL Executor](https://arxiv.org/pdf/2107.07653.pdf). > If you are also familiar with [fairseq](https://github.com/pytorch/fairseq), you may also find [the official implementation](https://github.com/microsoft/Table-Pretraining) useful, which leverages the framework. ## Table Question Answering Tasks ### What is Table Question Answering ![Example](https://table-pretraining.github.io/assets/tableqa_task.png) The task of Table Question Answering (TableQA) is to empower machines to answer users' questions over a given table. The resulting answer(s) can be a region in the table, or a number calculated by applying aggregation operators to a specific region. ### What Questions Can be Answered Benefiting from the powerfulness of generative models, TAPEX can deal with almost all kinds of questions over tables (if there is training data). Below are some typical question and their answers taken from [WikiTableQuestion](https://nlp.stanford.edu/blog/wikitablequestions-a-complex-real-world-question-understanding-dataset). | Question | Answer | | :---: | :---: | | What is the years won for each team? | 2004, 2008, 2012 | | How long did Taiki Tsuchiya last? | 4:27 | | What is the total amount of matches drawn? | 1 | | Besides Tiger Woods, what other player won between 2007 and 2009? | Camilo Villegas | | What was the last Baekje Temple? | Uija | | What is the difference between White voters and Black voters in 1948? | 0 | | What is the average number of sailors for each country during the worlds qualification tournament? | 2 | ### How to Fine-tune TAPEX on TableQA We provide a fine-tuning script of tapex for TableQA on the WikiSQL benchmark: [WikiSQL](https://github.com/salesforce/WikiSQL). This script is customized for tapex models, and can be easily adapted to other benchmarks such as WikiTableQuestion (only some tweaks in the function `preprocess_tableqa_function`). #### TAPEX-Base on WikiSQL Here is how to run the script on the WikiSQL with `tapex-base`: > The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 16GB and 1 GPU card. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. ```bash export EXP_NAME=wikisql_tapex_base python run_wikisql_with_tapex.py \ --do_train \ --do_eval \ --output_dir $EXP_NAME \ --model_name_or_path microsoft/tapex-base \ --overwrite_output_dir \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 8 \ --per_device_eval_batch_size 4 \ --learning_rate 3e-5 \ --logging_steps 10 \ --eval_steps 1000 \ --save_steps 1000 \ --warmup_steps 1000 \ --eval_strategy steps \ --predict_with_generate \ --num_beams 5 \ --weight_decay 1e-2 \ --label_smoothing_factor 0.1 \ --max_steps 20000 ``` #### TAPEX-Large on WikiSQL Here is how to run the script on the WikiSQL with `tapex-large`: > The default hyper-parameter may allow you to reproduce our reported tapex-large results within the memory budget of 16GB and 1 GPU card with fp16. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. If you do not install apex or other mixed-precision-training libs, you could disable the `predict_with_generate` option to save GPU memory and manually evaluate the model once the fine-tuning finished. Or just pick up the last checkpoint, which usually performs good enough on the dataset. ```bash export EXP_NAME=wikisql_tapex_large python run_wikisql_with_tapex.py \ --do_train \ --do_eval \ --output_dir $EXP_NAME \ --model_name_or_path microsoft/tapex-large \ --overwrite_output_dir \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 32 \ --per_device_eval_batch_size 4 \ --learning_rate 3e-5 \ --logging_steps 10 \ --eval_steps 1000 \ --save_steps 1000 \ --warmup_steps 1000 \ --eval_strategy steps \ --predict_with_generate \ --num_beams 5 \ --weight_decay 1e-2 \ --label_smoothing_factor 0.1 \ --max_steps 20000 \ --fp16 ``` #### TAPEX-Base on WikiTableQuestions Here is how to run the script on the WikiTableQuestions with `tapex-base`: > The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 16GB and 1 GPU card. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. ```bash export EXP_NAME=wikitablequestions_tapex_base python run_wikitablequestions_with_tapex.py \ --do_train \ --do_eval \ --output_dir $EXP_NAME \ --model_name_or_path microsoft/tapex-base \ --overwrite_output_dir \ --per_device_train_batch_size 4 \ --gradient_accumulation_steps 8 \ --per_device_eval_batch_size 4 \ --learning_rate 3e-5 \ --logging_steps 10 \ --eval_steps 1000 \ --save_steps 1000 \ --warmup_steps 1000 \ --eval_strategy steps \ --predict_with_generate \ --num_beams 5 \ --weight_decay 1e-2 \ --label_smoothing_factor 0.1 \ --max_steps 20000 ``` #### TAPEX-Large on WikiTableQuestions Here is how to run the script on the WikiTableQuestions with `tapex-large`: > The default hyper-parameter may allow you to reproduce our reported tapex-large results within the memory budget of 16GB and 1 GPU card with fp16. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. If you do not install apex or other mixed-precision-training libs, you could reduce the `per_device_train_batch_size` and `per_device_eval_batch_size` and have another try. Or you could disable the `predict_with_generate` option to save GPU memory and manually evaluate the model once the fine-tuning finished. Or just pick up the last checkpoint, which usually performs good enough on the dataset. ```bash export EXP_NAME=wikitablequestions_tapex_large python run_wikitablequestions_with_tapex.py \ --do_train \ --do_eval \ --output_dir $EXP_NAME \ --model_name_or_path microsoft/tapex-large \ --overwrite_output_dir \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 12 \ --per_device_eval_batch_size 4 \ --learning_rate 3e-5 \ --logging_steps 10 \ --eval_steps 1000 \ --save_steps 1000 \ --warmup_steps 1000 \ --eval_strategy steps \ --predict_with_generate \ --num_beams 5 \ --weight_decay 1e-2 \ --label_smoothing_factor 0.1 \ --max_steps 20000 \ --fp16 ``` ### How to Evaluate TAPEX Fine-tuned Models on TableQA We provide fine-tuned model weights to reproduce our results. You can evaluate them using the following command: > You can also replace `microsoft/tapex-base-finetuned-wikisql` with your local directory to evaluate your fine-tuned models. Notice that if the model has a larger size, you should reduce `per_device_eval_batch_size` to fit the memory requirement. ```bash export EXP_NAME=wikisql_tapex_base_eval python run_wikisql_with_tapex.py \ --do_eval \ --model_name_or_path microsoft/tapex-base-finetuned-wikisql \ --output_dir $EXP_NAME \ --per_device_eval_batch_size 4 \ --predict_with_generate \ --num_beams 5 ``` ## Table Fact Verification Tasks ### What is Table Fact Verification ![Example](https://table-pretraining.github.io/assets/tableft_task.png) The task of Table Fact Verification (TableFV) is to empower machines to justify if a statement follows facts in a given table. The result is a binary classification belonging to `1` (entailed) or `0` (refused). ### How to Fine-tune TAPEX on TableFV #### TAPEX-Base on TabFact We provide a fine-tuning script of tapex for TableFV on the TabFact benchmark: [TabFact](https://github.com/wenhuchen/Table-Fact-Checking). Here is how to run the script on the TabFact: > The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 16GB and 1 GPU card. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. Note that the `eval_accumulation_steps` is necessary, otherwise GPU memory leaks will occur during the evaluation. ```bash export EXP_NAME=tabfact_tapex_base python run_tabfact_with_tapex.py \ --do_train \ --do_eval \ --output_dir $EXP_NAME \ --model_name_or_path microsoft/tapex-base \ --overwrite_output_dir \ --per_device_train_batch_size 3 \ --gradient_accumulation_steps 16 \ --per_device_eval_batch_size 12 \ --eval_accumulation_steps 6 \ --warm_steps 1000 \ --logging_steps 10 \ --learning_rate 3e-5 \ --eval_steps 1000 \ --save_steps 1000 \ --eval_strategy steps \ --weight_decay 1e-2 \ --max_steps 30000 \ --max_grad_norm 0.1 ``` #### TAPEX-Large on TabFact Here is how to run the script on the TabFact: > The default hyper-parameter may allow you to reproduce our reported tapex-base results within the memory budget of 24GB and 1 GPU card. Sorry we cannot reduce the memory consumption since the model input in TabFact usually contains nearly ~1000 tokens. If you have more GPU cards, you could reduce `gradient_accumulation_steps` accordingly. Note that the `eval_accumulation_steps` is necessary, otherwise GPU memory leaks will occur during the evaluation. ```bash export EXP_NAME=tabfact_tapex_large python run_tabfact_with_tapex.py \ --do_train \ --do_eval \ --output_dir $EXP_NAME \ --model_name_or_path microsoft/tapex-large \ --overwrite_output_dir \ --per_device_train_batch_size 2 \ --gradient_accumulation_steps 18 \ --per_device_eval_batch_size 4 \ --eval_accumulation_steps 12 \ --warm_steps 1000 \ --logging_steps 10 \ --learning_rate 3e-5 \ --eval_steps 1000 \ --save_steps 1000 \ --eval_strategy steps \ --weight_decay 1e-2 \ --max_steps 30000 \ --max_grad_norm 0.1 ``` ### How to Evaluate TAPEX Fine-tuned Models on TableFV We provide fine-tuned model weights to reproduce our results. You can evaluate them using the following command: > You can also replace `microsoft/tapex-base-finetuned-tabfact` with your local directory to evaluate your fine-tuned models. Notice that if the model has a larger size, you should reduce `per_device_eval_batch_size` to fit the memory requirement. ```bash export EXP_NAME=tabfact_tapex_base_eval python run_tabfact_with_tapex.py \ --do_eval \ --model_name_or_path microsoft/tapex-base-finetuned-tabfact \ --output_dir $EXP_NAME \ --per_device_eval_batch_size 12 \ --eval_accumulation_steps 6 ``` ## Reproduced Results We get the following results on the dev set of the benchmark with the previous commands: | Task | Model Size | Metric | Result | |:---:|:---:|:---:|:---:| | WikiSQL (Weak) | Base | Denotation Accuracy | 88.1 | | WikiSQL (Weak) | Large | Denotation Accuracy | 89.5 | | WikiTableQuestion | Base | Denotation Accuracy | 47.1 | | WikiTableQuestion | Large | Denotation Accuracy | 57.2 | | TabFact | Base | Accuracy | 78.7 | | TabFact | Large | Accuracy | 83.6 |
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/self-training-text-classification/finetuning.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning the library models for sequence classification.""" import argparse import dataclasses import json import logging import math import os import random import shutil from typing import List, Optional import datasets import numpy as np import pandas as pd import torch from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformers import ( AdamW, AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, DataCollatorWithPadding, default_data_collator, get_scheduler, set_seed, ) from transformers.file_utils import ExplicitEnum from transformers.trainer_utils import IntervalStrategy logger = logging.getLogger(__name__) class Split(ExplicitEnum): TRAIN = "train" EVAL = "eval" TEST = "test" INFER = "infer" @dataclasses.dataclass class FTModelArguments: """Arguments pertaining to which config/tokenizer/model we are going to fine-tune from.""" model_name_or_path: str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) use_fast_tokenizer: Optional[bool] = dataclasses.field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) cache_dir: Optional[str] = dataclasses.field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, ) @dataclasses.dataclass class FTDataArguments: """Arguments pertaining to what data we are going to input our model for training and evaluation.""" train_file: str = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the training data."} ) eval_file: Optional[str] = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) test_file: Optional[str] = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the test data."} ) infer_file: Optional[str] = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the data to predict on."} ) task_name: Optional[str] = dataclasses.field( default=None, metadata={"help": "The name of the task to train on."}, ) label_list: Optional[List[str]] = dataclasses.field( default=None, metadata={"help": "The list of labels for the task."} ) max_length: Optional[int] = dataclasses.field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) pad_to_max_length: Optional[bool] = dataclasses.field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) @dataclasses.dataclass class FTTrainingArguments: """Training arguments pertaining to the training loop itself.""" output_dir: str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) do_train: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to run training or not."}, ) do_eval: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to run evaluation on the validation set or not."}, ) do_predict: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to run inference on the inference set or not."}, ) seed: Optional[int] = dataclasses.field( default=42, metadata={"help": "Random seed that will be set at the beginning of training."}, ) per_device_train_batch_size: Optional[int] = dataclasses.field( default=8, metadata={"help": "The batch size per GPU/TPU core/CPU for training."}, ) per_device_eval_batch_size: Optional[int] = dataclasses.field( default=8, metadata={"help": "The batch size per GPU/TPU core/CPU for evaluation."}, ) weight_decay: Optional[float] = dataclasses.field( default=0.0, metadata={ "help": ( "The weight decay to apply (if not zero) to all layers except all bias and LayerNorm weights in" " [`AdamW`] optimizer." ) }, ) learning_rate: Optional[float] = dataclasses.field( default=5e-5, metadata={"help": "The initial learning rate for [`AdamW`] optimizer."}, ) gradient_accumulation_steps: Optional[int] = dataclasses.field( default=1, metadata={ "help": ( "Number of updates steps to accumulate the gradients for, before performing a backward/update pass." ) }, ) max_steps: Optional[int] = dataclasses.field( default=-1, metadata={ "help": ( "If set to a positive number, the total number of training steps to perform. Overrides" " `num_train_epochs`." ) }, ) lr_scheduler_type: Optional[str] = dataclasses.field( default="linear", metadata={"help": "The scheduler type to use."} ) warmup_steps: Optional[int] = dataclasses.field( default=1, metadata={ "help": ( "Number of steps used for a linear warmup from 0 to `learning_rate`. Overrides any effect of" " `warmup_ratio`." ) }, ) eval_strategy: Optional[str] = dataclasses.field( default="no", metadata={ "help": 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' }, ) eval_steps: Optional[int] = dataclasses.field( default=1, metadata={"help": 'Number of update steps between two evaluations if `eval_strategy="steps"`.'}, ) eval_metric: Optional[str] = dataclasses.field( default="accuracy", metadata={"help": "The evaluation metric used for the task."} ) keep_checkpoint_max: Optional[int] = dataclasses.field( default=1, metadata={"help": "The maximum number of best checkpoint files to keep."}, ) early_stopping_patience: Optional[int] = dataclasses.field( default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) early_stopping_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." }, ) def train(args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader=None): """Train a model on the given training data.""" total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(" Num examples = %d", args.num_examples[Split.TRAIN.value]) logger.info(" Instantaneous batch size per device = %d", args.per_device_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_batch_size) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", args.max_steps) # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_steps), disable=not accelerator.is_local_main_process) checkpoints = None eval_results = None best_checkpoint = None best_eval_result = None early_stopping_patience_counter = 0 should_training_stop = False epoch = 0 completed_steps = 0 train_loss = 0.0 model.zero_grad() for _ in range(args.num_train_epochs): epoch += 1 model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss loss = loss / args.gradient_accumulation_steps accelerator.backward(loss) train_loss += loss.item() if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1: optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) completed_steps += 1 # Evaluate during training if ( eval_dataloader is not None and args.eval_strategy == IntervalStrategy.STEPS.value and args.eval_steps > 0 and completed_steps % args.eval_steps == 0 ): accelerator.wait_for_everyone() new_checkpoint = f"checkpoint-{IntervalStrategy.STEPS.value}-{completed_steps}" new_eval_result = evaluate(args, accelerator, eval_dataloader, "eval", model, new_checkpoint)[ args.eval_metric ] logger.info( "Evaluation result at step %d: %s = %f", completed_steps, args.eval_metric, new_eval_result ) if checkpoints is None: checkpoints = np.array([new_checkpoint]) eval_results = np.array([new_eval_result]) best_checkpoint = new_checkpoint best_eval_result = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: best_checkpoint = new_checkpoint best_eval_result = new_eval_result early_stopping_patience_counter = 0 else: if new_eval_result == best_eval_result: best_checkpoint = new_checkpoint best_eval_result = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: should_training_stop = True checkpoints = np.append(checkpoints, [new_checkpoint], axis=0) eval_results = np.append(eval_results, [new_eval_result], axis=0) sorted_ids = np.argsort(eval_results) eval_results = eval_results[sorted_ids] checkpoints = checkpoints[sorted_ids] if len(checkpoints) > args.keep_checkpoint_max: # Delete the current worst checkpoint checkpoint_to_remove, *checkpoints = checkpoints eval_results = eval_results[1:] if checkpoint_to_remove != new_checkpoint: if accelerator.is_main_process: shutil.rmtree(os.path.join(args.output_dir, checkpoint_to_remove), ignore_errors=True) accelerator.wait_for_everyone() if new_checkpoint in checkpoints: # Save model checkpoint checkpoint_output_dir = os.path.join(args.output_dir, new_checkpoint) if accelerator.is_main_process: if not os.path.exists(checkpoint_output_dir): os.makedirs(checkpoint_output_dir) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(checkpoint_output_dir) logger.info("Saving model checkpoint to %s", checkpoint_output_dir) if completed_steps >= args.max_steps: break if should_training_stop: break # Evaluate during training if eval_dataloader is not None and args.eval_strategy == IntervalStrategy.EPOCH.value: accelerator.wait_for_everyone() new_checkpoint = f"checkpoint-{IntervalStrategy.EPOCH.value}-{epoch}" new_eval_result = evaluate(args, accelerator, eval_dataloader, "eval", model, new_checkpoint)[ args.eval_metric ] logger.info("Evaluation result at epoch %d: %s = %f", epoch, args.eval_metric, new_eval_result) if checkpoints is None: checkpoints = np.array([new_checkpoint]) eval_results = np.array([new_eval_result]) best_checkpoint = new_checkpoint best_eval_result = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: best_checkpoint = new_checkpoint best_eval_result = new_eval_result early_stopping_patience_counter = 0 else: if new_eval_result == best_eval_result: best_checkpoint = new_checkpoint best_eval_result = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: should_training_stop = True checkpoints = np.append(checkpoints, [new_checkpoint], axis=0) eval_results = np.append(eval_results, [new_eval_result], axis=0) sorted_ids = np.argsort(eval_results) eval_results = eval_results[sorted_ids] checkpoints = checkpoints[sorted_ids] if len(checkpoints) > args.keep_checkpoint_max: # Delete the current worst checkpoint checkpoint_to_remove, *checkpoints = checkpoints eval_results = eval_results[1:] if checkpoint_to_remove != new_checkpoint: if accelerator.is_main_process: shutil.rmtree(os.path.join(args.output_dir, checkpoint_to_remove), ignore_errors=True) accelerator.wait_for_everyone() if new_checkpoint in checkpoints: # Save model checkpoint checkpoint_output_dir = os.path.join(args.output_dir, new_checkpoint) if accelerator.is_main_process: if not os.path.exists(checkpoint_output_dir): os.makedirs(checkpoint_output_dir) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(checkpoint_output_dir) logger.info("Saving model checkpoint to %s", checkpoint_output_dir) if completed_steps >= args.max_steps: break if should_training_stop: break if best_checkpoint is not None: # Save the best checkpoint logger.info("Best checkpoint: %s", best_checkpoint) logger.info("Best evaluation result: %s = %f", args.eval_metric, best_eval_result) best_checkpoint_output_dir = os.path.join(args.output_dir, best_checkpoint) if accelerator.is_main_process: shutil.move(best_checkpoint_output_dir, os.path.join(args.output_dir, "best-checkpoint")) shutil.rmtree(best_checkpoint_output_dir, ignore_errors=True) accelerator.wait_for_everyone() else: # Assume that the last checkpoint is the best checkpoint and save it checkpoint_output_dir = os.path.join(args.output_dir, "best-checkpoint") if not os.path.exists(checkpoint_output_dir): os.makedirs(checkpoint_output_dir) accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained(checkpoint_output_dir, save_function=accelerator.save) if accelerator.is_main_process: tokenizer.save_pretrained(checkpoint_output_dir) logger.info("Saving model checkpoint to %s", checkpoint_output_dir) return completed_steps, train_loss / completed_steps def evaluate(args, accelerator, dataloader, eval_set, model, checkpoint, has_labels=True, write_to_file=True): """Evaluate a model checkpoint on the given evaluation data.""" num_examples = args.num_examples[eval_set] eval_metric = None completed_steps = 0 eval_loss = 0.0 all_predictions = None all_references = None all_probabilities = None if has_labels: # Get the metric function eval_metric = load_metric(args.eval_metric) eval_results = {} model.eval() for _, batch in enumerate(dataloader): with torch.no_grad(): outputs = model(**batch) eval_loss += outputs.loss.item() logits = outputs.logits predictions = logits.argmax(dim=-1) if not args.is_regression else logits.squeeze() predictions = accelerator.gather(predictions) if all_predictions is None: all_predictions = predictions.detach().cpu().numpy() else: all_predictions = np.append(all_predictions, predictions.detach().cpu().numpy(), axis=0) if not args.is_regression: probabilities = logits.softmax(dim=-1).max(dim=-1).values probabilities = accelerator.gather(probabilities) if all_probabilities is None: all_probabilities = probabilities.detach().cpu().numpy() else: all_probabilities = np.append(all_probabilities, probabilities.detach().cpu().numpy(), axis=0) if has_labels: references = batch["labels"] references = accelerator.gather(references) if all_references is None: all_references = references.detach().cpu().numpy() else: all_references = np.append(all_references, references.detach().cpu().numpy(), axis=0) eval_metric.add_batch( predictions=predictions, references=references, ) completed_steps += 1 if has_labels: eval_results.update(eval_metric.compute()) eval_results["completed_steps"] = completed_steps eval_results["avg_eval_loss"] = eval_loss / completed_steps if write_to_file: accelerator.wait_for_everyone() if accelerator.is_main_process: results_file = os.path.join(args.output_dir, f"{eval_set}_results_{checkpoint}.json") with open(results_file, "w") as f: json.dump(eval_results, f, indent=4, sort_keys=True) if write_to_file: accelerator.wait_for_everyone() if accelerator.is_main_process: output_file = os.path.join(args.output_dir, f"{eval_set}_output_{checkpoint}.csv") if not args.is_regression: assert len(all_predictions) == len(all_probabilities) df = pd.DataFrame(list(zip(all_predictions, all_probabilities)), columns=["prediction", "probability"]) else: df = pd.DataFrame(all_predictions, columns=["prediction"]) df = df.head(num_examples) df.to_csv(output_file, header=True, index=False) return eval_results def load_from_pretrained(args, pretrained_model_name_or_path): """Load the pretrained model and tokenizer.""" # In distributed training, the .from_pretrained methods guarantee that only # one local process can concurrently perform this procedure. config = AutoConfig.from_pretrained( pretrained_model_name_or_path, num_labels=args.num_labels if hasattr(args, "num_labels") else None, finetuning_task=args.task_name.lower(), cache_dir=args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( pretrained_model_name_or_path, use_fast=args.use_fast_tokenizer, cache_dir=args.cache_dir ) model = AutoModelForSequenceClassification.from_pretrained( pretrained_model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, ignore_mismatched_sizes=True, cache_dir=args.cache_dir, ) return config, tokenizer, model def finetune(accelerator, model_name_or_path, train_file, output_dir, **kwargs): """Fine-tuning a pre-trained model on a downstream task. Args: accelerator: An instance of an accelerator for distributed training (on multi-GPU, TPU) or mixed precision training. model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. """ # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) model_args = FTModelArguments(model_name_or_path=model_name_or_path) data_args = FTDataArguments(train_file=train_file) training_args = FTTrainingArguments(output_dir=output_dir) args = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(arg_class).items(): setattr(args, key, value) for key, value in kwargs.items(): if hasattr(args, key): setattr(args, key, value) # Sanity checks data_files = {} args.data_file_extension = None # You need to provide the training data as we always run training args.do_train = True assert args.train_file is not None data_files[Split.TRAIN.value] = args.train_file if args.do_eval or args.eval_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None data_files[Split.EVAL.value] = args.eval_file if args.do_eval and args.test_file is not None: data_files[Split.TEST.value] = args.test_file if args.do_predict: assert args.infer_file is not None data_files[Split.INFER.value] = args.infer_file for key in data_files: extension = data_files[key].split(".")[-1] assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: args.data_file_extension = extension else: assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # Handle the output directory creation if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # You need to provide your CSV/JSON data files. # # For CSV/JSON files, this script will use as labels the column called 'label' # and as pair of sentences the sentences in columns called 'sentence1' and # 'sentence2' if these columns exist or the first two columns not named # 'label' if at least two columns are provided. # # If the CSVs/JSONs contain only one non-label column, the script does single # sentence classification on this single column. # # In distributed training, the load_dataset function guarantees that only one # local process can download the dataset. # Loading the dataset from local csv or json files. raw_datasets = load_dataset(args.data_file_extension, data_files=data_files) # Labels is_regression = raw_datasets[Split.TRAIN.value].features["label"].dtype in ["float32", "float64"] args.is_regression = is_regression if args.is_regression: label_list = None num_labels = 1 else: label_list = args.label_list assert label_list is not None label_list.sort() # Let's sort it for determinism num_labels = len(label_list) args.num_labels = num_labels # Load pre-trained model config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path) # Preprocessing the datasets non_label_column_names = [name for name in raw_datasets[Split.TRAIN.value].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None label_to_id = {v: i for i, v in enumerate(label_list)} config.label2id = label_to_id config.id2label = {id: label for label, id in config.label2id.items()} padding = "max_length" if args.pad_to_max_length else False def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, padding=padding, max_length=args.max_length, truncation=True) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will # expect that. result["labels"] = examples["label"] return result with accelerator.main_process_first(): processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets[Split.TRAIN.value].column_names, desc="Running tokenizer on dataset", ) num_examples = {} splits = [s.value for s in Split] for split in splits: if split in processed_datasets: num_examples[split] = len(processed_datasets[split]) args.num_examples = num_examples train_dataset = processed_datasets[Split.TRAIN.value] eval_dataset = processed_datasets[Split.EVAL.value] if Split.EVAL.value in processed_datasets else None test_dataset = processed_datasets[Split.TEST.value] if Split.TEST.value in processed_datasets else None infer_dataset = processed_datasets[Split.INFER.value] if Split.INFER.value in processed_datasets else None # Log a few random samples from the training set: for index in random.sample(range(len(train_dataset)), 3): logger.info("Sample %d of the training set: %s.", index, train_dataset[index]) # DataLoaders creation: if args.pad_to_max_length: # If padding was already done ot max length, we use the default data # collator that will just convert everything to tensors. data_collator = default_data_collator else: # Otherwise, `DataCollatorWithPadding` will apply dynamic padding for us (by # padding to the maximum length of the samples passed). When using mixed # precision, we add `pad_to_multiple_of=8` to pad all tensors to multiple of # 8s, which will enable the use of Tensor Cores on NVIDIA hardware with # compute capability >= 7.5 (Volta). data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=(8 if accelerator.use_fp16 else None)) train_dataloader = DataLoader( train_dataset, batch_size=args.per_device_train_batch_size, shuffle=True, collate_fn=data_collator, ) eval_dataloader, test_dataloader, infer_dataloader = None, None, None if eval_dataset is not None: eval_dataloader = DataLoader( eval_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator ) if test_dataset is not None: test_dataloader = DataLoader( test_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator ) if infer_dataset is not None: infer_dataloader = DataLoader( infer_dataset, batch_size=args.per_device_eval_batch_size, collate_fn=data_collator ) # Optimizer # Split weights in two groups, one with weight decay and the other not. no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, test_dataloader, infer_dataloader ) # Note -> the training dataloader needs to be prepared before we grab its # length below (cause its length will be shorter in multiprocess) # Scheduler and math around the number of training steps. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_steps == -1: args.max_steps = args.num_train_epochs * num_update_steps_per_epoch else: args.num_train_epochs = math.ceil(args.max_steps / num_update_steps_per_epoch) lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=args.max_steps, ) # Train completed_steps, avg_train_loss = train( args, accelerator, model, tokenizer, train_dataloader, optimizer, lr_scheduler, eval_dataloader ) accelerator.wait_for_everyone() logger.info("Training job completed: completed_steps = %d, avg_train_loss = %f", completed_steps, avg_train_loss) args.model_name_or_path = os.path.join(args.output_dir, "best-checkpoint") logger.info("Loading the best checkpoint: %s", args.model_name_or_path) config, tokenizer, model = load_from_pretrained(args, args.model_name_or_path) model = accelerator.prepare(model) if args.do_eval: # Evaluate if eval_dataloader is not None: logger.info("***** Running evaluation on the eval data using the best checkpoint *****") eval_results = evaluate(args, accelerator, eval_dataloader, Split.EVAL.value, model, "best-checkpoint") avg_eval_loss = eval_results["avg_eval_loss"] eval_metric = eval_results[args.eval_metric] logger.info("Evaluation job completed: avg_eval_loss = %f", avg_eval_loss) logger.info("Evaluation result for the best checkpoint: %s = %f", args.eval_metric, eval_metric) if test_dataloader is not None: logger.info("***** Running evaluation on the test data using the best checkpoint *****") eval_results = evaluate(args, accelerator, test_dataloader, Split.TEST.value, model, "best-checkpoint") avg_eval_loss = eval_results["avg_eval_loss"] eval_metric = eval_results[args.eval_metric] logger.info("Test job completed: avg_test_loss = %f", avg_eval_loss) logger.info("Test result for the best checkpoint: %s = %f", args.eval_metric, eval_metric) if args.do_predict: # Predict if infer_dataloader is not None: logger.info("***** Running inference using the best checkpoint *****") evaluate( args, accelerator, infer_dataloader, Split.INFER.value, model, "best-checkpoint", has_labels=False ) logger.info("Inference job completed.") # Release all references to the internal objects stored and call the garbage # collector. You should call this method between two trainings with different # models/optimizers. accelerator.free_memory()
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/self-training-text-classification/requirements.txt
accelerate datasets >= 1.8.0 protobuf scikit-learn scipy sentencepiece != 0.1.92 torch >= 1.3
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/self-training-text-classification/selftraining.py
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Self-training for sequence classification.""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy logger = logging.getLogger(__name__) MODEL_BIN_FILE = "pytorch_model.bin" @dataclasses.dataclass class STModelArguments: """Arguments pertaining to which config/tokenizer/model we are going to fine-tune from.""" model_name_or_path: str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) cache_dir: Optional[str] = dataclasses.field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."}, ) @dataclasses.dataclass class STDataArguments: """Arguments pertaining to what data we are going to input our model for training and evaluation.""" train_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."}) infer_file: str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."}) eval_file: Optional[str] = dataclasses.field( default=None, metadata={"help": "A csv or a json file containing the validation data."} ) task_name: Optional[str] = dataclasses.field( default=None, metadata={"help": "The name of the task to train on."}, ) label_list: Optional[List[str]] = dataclasses.field( default=None, metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class STTrainingArguments: """Training arguments pertaining to the training loop itself.""" output_dir: str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) eval_metric: Optional[str] = dataclasses.field( default="accuracy", metadata={"help": "The evaluation metric used for the task."} ) eval_strategy: Optional[str] = dataclasses.field( default="no", metadata={ "help": 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' }, ) early_stopping_patience: Optional[int] = dataclasses.field( default=10, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) early_stopping_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." }, ) do_filter_by_confidence: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."}, ) do_filter_by_val_performance: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."}, ) finetune_on_labeled_data: Optional[bool] = dataclasses.field( default=False, metadata={"help": "Whether to fine-tune on labeled data after pseudo training."}, ) confidence_threshold: Optional[float] = dataclasses.field( default=0.0, metadata={"help": "Confidence threshold for pseudo-labeled data filtering."}, ) max_selftrain_iterations: Optional[int] = dataclasses.field( default=100, metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."}, ) seed: Optional[int] = dataclasses.field( default=None, metadata={"help": "Random seed for initialization."}, ) def create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir): """Create pseudeo labeled data for the next self-training iteration.""" dataset = datasets.concatenate_datasets([infer_input, infer_output], axis=1) if args.do_filter_by_confidence: dataset = dataset.filter(lambda example: example["probability"] > args.confidence_threshold) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 num_selected_rows = int(eval_result * len(dataset)) print(num_selected_rows) dataset = dataset.sort("probability", reverse=True) dataset = dataset.select(range(num_selected_rows)) dataset = dataset.remove_columns(["label", "probability"]) dataset = dataset.rename_column("prediction", "label") dataset = dataset.map(lambda example: {"label": id2label[example["label"]]}) dataset = dataset.shuffle(seed=args.seed) pseudo_labeled_data_file = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.data_file_extension == "csv": dataset.to_csv(pseudo_labeled_data_file, index=False) else: dataset.to_json(pseudo_labeled_data_file) def selftrain(model_name_or_path, train_file, infer_file, output_dir, **kwargs): """Self-training a pre-trained model on a downstream task. Args: model_name_or_path: Path to pretrained model or model identifier from huggingface.co/models. train_file: A csv or a json file containing the training data. infer_file: A csv or a json file containing the data to predict on. output_dir: The output directory where the model predictions and checkpoints will be written. **kwargs: Dictionary of key/value pairs with which to update the configuration object after loading. The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. """ # Initialize the accelerator. We will let the accelerator handle device # placement for us. accelerator = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() model_args = STModelArguments(model_name_or_path=model_name_or_path) data_args = STDataArguments(train_file=train_file, infer_file=infer_file) training_args = STTrainingArguments(output_dir=output_dir) args = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(arg_class).items(): setattr(args, key, value) for key, value in kwargs.items(): if hasattr(args, key): setattr(args, key, value) # Sanity checks data_files = {} args.data_file_extension = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None data_files["train"] = args.train_file data_files["infer"] = args.infer_file if args.eval_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None data_files["eval"] = args.eval_file for key in data_files: extension = data_files[key].split(".")[-1] assert extension in ["csv", "json"], f"`{key}_file` should be a csv or a json file." if args.data_file_extension is None: args.data_file_extension = extension else: assert extension == args.data_file_extension, f"`{key}_file` should be a {args.data_file_extension} file`." assert ( args.eval_metric in datasets.list_metrics() ), f"{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}." # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) logger.info("Creating the initial data directory for self-training...") data_dir_format = f"{args.output_dir}/self-train_iter-{{}}".format initial_data_dir = data_dir_format(0) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) os.makedirs(initial_data_dir, exist_ok=True) accelerator.wait_for_everyone() best_iteration = None best_eval_result = None early_stopping_patience_counter = 0 should_training_stop = False # Show the progress bar progress_bar = tqdm(range(args.max_selftrain_iterations), disable=not accelerator.is_local_main_process) # Self-train for iteration in range(0, int(args.max_selftrain_iterations)): current_data_dir = data_dir_format(iteration) assert os.path.exists(current_data_dir) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 current_output_dir = os.path.join(current_data_dir, "stage-1") arguments_dict = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "eval_strategy": args.eval_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(training_args, key): arguments_dict.update({key: value}) model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) if os.path.exists(model_bin_file_path): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.", model_bin_file_path, iteration, ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****", iteration) finetune(**arguments_dict) accelerator.wait_for_everyone() assert os.path.exists(model_bin_file_path) logger.info("Self-training job completed: iteration: %d, stage: 1.", iteration) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data model_path = os.path.join(current_output_dir, "best-checkpoint") current_output_dir = os.path.join(current_data_dir, "stage-2") # Update arguments_dict arguments_dict["model_name_or_path"] = model_path arguments_dict["train_file"] = data_files["train"] arguments_dict["output_dir"] = current_output_dir model_bin_file_path = os.path.join(current_output_dir, "best-checkpoint", MODEL_BIN_FILE) if os.path.exists(model_bin_file_path): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.", model_bin_file_path, iteration, ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****", iteration) finetune(**arguments_dict) accelerator.wait_for_everyone() assert os.path.exists(model_bin_file_path) logger.info("Self-training job completed: iteration: %d, stage: 2.", iteration) new_iteration = iteration next_data_dir = data_dir_format(iteration + 1) config = AutoConfig.from_pretrained(os.path.join(current_output_dir, "best-checkpoint")) id2label = config.id2label eval_results_file = os.path.join(current_output_dir, "eval_results_best-checkpoint.json") test_results_file = os.path.join(current_output_dir, "test_results_best-checkpoint.json") assert os.path.exists(eval_results_file) with open(eval_results_file, "r") as f: eval_result = float(json.load(f)[args.eval_metric]) infer_output_file = os.path.join(current_output_dir, "infer_output_best-checkpoint.csv") assert os.path.exists(infer_output_file) # Loading the dataset from local csv or json files. infer_input = load_dataset(args.data_file_extension, data_files={"data": data_files["infer"]})["data"] infer_output = load_dataset("csv", data_files={"data": infer_output_file})["data"] if accelerator.is_main_process: os.makedirs(next_data_dir, exist_ok=True) shutil.copy(eval_results_file, os.path.join(output_dir, f"eval_results_iter-{iteration}.json")) if os.path.exists(test_results_file): shutil.copy(eval_results_file, os.path.join(output_dir, f"test_results_iter-{iteration}.json")) create_pseudo_labeled_data(args, infer_input, infer_output, eval_result, id2label, next_data_dir) accelerator.wait_for_everyone() data_files["train_pseudo"] = os.path.join(next_data_dir, f"train_pseudo.{args.data_file_extension}") if args.eval_strategy != IntervalStrategy.NO.value: new_eval_result = eval_result if best_iteration is None: best_iteration = new_iteration best_eval_result = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: best_iteration = new_iteration best_eval_result = new_eval_result early_stopping_patience_counter = 0 else: if new_eval_result == best_eval_result: best_iteration = new_iteration best_eval_result = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: should_training_stop = True progress_bar.update(1) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d", best_iteration) logger.info("Best evaluation result: %s = %f", args.eval_metric, best_eval_result) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(output_dir, f"eval_results_iter-{iteration}.json"), os.path.join(output_dir, "eval_results_best-iteration.json"), ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d", args.max_selftrain_iterations - 1) logger.info("Best evaluation result: %s = %f", args.eval_metric, eval_result) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(output_dir, f"eval_results_iter-{args.max_selftrain_iterations - 1}.json"), os.path.join(output_dir, "eval_results_best-iteration.json"), )
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/self-training-text-classification/README.md
# Self-training This is an implementation of the self-training algorithm (without task augmentation) in the [EMNLP 2021](https://2021.emnlp.org/) paper: [STraTA: Self-Training with Task Augmentation for Better Few-shot Learning](https://arxiv.org/abs/2109.06270). Please check out https://github.com/google-research/google-research/tree/master/STraTA for the original codebase. **Note**: The code can be used as a tool for automatic data labeling. ## Table of Contents * [Installation](#installation) * [Self-training](#self-training) * [Running self-training with a base model](#running-self-training-with-a-base-model) * [Hyperparameters for self-training](#hyperparameters-for-self-training) * [Distributed training](#distributed-training) * [Demo](#demo) * [How to cite](#how-to-cite) ## Installation This repository is tested on Python 3.8+, PyTorch 1.10+, and the 🤗 Transformers 4.16+. You should install all necessary Python packages in a [virtual environment](https://docs.python.org/3/library/venv.html). If you are unfamiliar with Python virtual environments, please check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Below, we create a virtual environment with the [Anaconda Python distribution](https://www.anaconda.com/products/distribution) and activate it. ```sh conda create -n strata python=3.9 conda activate strata ``` Next, you need to install 🤗 Transformers. Please refer to [🤗 Transformers installation page](https://github.com/huggingface/transformers#installation) for a detailed guide. ```sh pip install transformers ``` Finally, install all necessary Python packages for our self-training algorithm. ```sh pip install -r STraTA/selftraining/requirements.txt ``` This will install PyTorch as a backend. ## Self-training ### Running self-training with a base model The following example code shows how to run our self-training algorithm with a base model (e.g., `BERT`) on the `SciTail` science entailment dataset, which has two classes `['entails', 'neutral']`. We assume that you have a data directory that includes some training data (e.g., `train.csv`), evaluation data (e.g., `eval.csv`), and unlabeled data (e.g., `infer.csv`). ```python import os from selftraining import selftrain data_dir = '/path/to/your/data/dir' parameters_dict = { 'max_selftrain_iterations': 100, 'model_name_or_path': '/path/to/your/base/model', # could be the id of a model hosted by 🤗 Transformers 'output_dir': '/path/to/your/output/dir', 'train_file': os.path.join(data_dir, 'train.csv'), 'infer_file': os.path.join(data_dir, 'infer.csv'), 'eval_file': os.path.join(data_dir, 'eval.csv'), 'eval_strategy': 'steps', 'task_name': 'scitail', 'label_list': ['entails', 'neutral'], 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'max_length': 128, 'learning_rate': 2e-5, 'max_steps': 100000, 'eval_steps': 1, 'early_stopping_patience': 50, 'overwrite_output_dir': True, 'do_filter_by_confidence': False, # 'confidence_threshold': 0.3, 'do_filter_by_val_performance': True, 'finetune_on_labeled_data': False, 'seed': 42, } selftrain(**parameters_dict) ``` **Note**: We checkpoint periodically during self-training. In case of preemptions, just re-run the above script and self-training will resume from the latest iteration. ### Hyperparameters for self-training If you have development data, you might want to tune some hyperparameters for self-training. Below are hyperparameters that could provide additional gains for your task. - `finetune_on_labeled_data`: If set to `True`, the resulting model from each self-training iteration is further fine-tuned on the original labeled data before the next self-training iteration. Intuitively, this would give the model a chance to "correct" ifself after being trained on pseudo-labeled data. - `do_filter_by_confidence`: If set to `True`, the pseudo-labeled data in each self-training iteration is filtered based on the model confidence. For instance, if `confidence_threshold` is set to `0.3`, pseudo-labeled examples with a confidence score less than or equal to `0.3` will be discarded. Note that `confidence_threshold` should be greater or equal to `1/num_labels`, where `num_labels` is the number of class labels. Filtering out the lowest-confidence pseudo-labeled examples could be helpful in some cases. - `do_filter_by_val_performance`: If set to `True`, the pseudo-labeled data in each self-training iteration is filtered based on the current validation performance. For instance, if your validation performance is 80% accuracy, you might want to get rid of 20% of the pseudo-labeled data with the lowest the confidence scores. ### Distributed training We strongly recommend distributed training with multiple accelerators. To activate distributed training, please try one of the following methods: 1. Run `accelerate config` and answer to the questions asked. This will save a `default_config.yaml` file in your cache folder for 🤗 Accelerate. Now, you can run your script with the following command: ```sh accelerate launch your_script.py --args_to_your_script ``` 2. Run your script with the following command: ```sh python -m torch.distributed.launch --nnodes="{$NUM_NODES}" --nproc_per_node="{$NUM_TRAINERS}" --your_script.py --args_to_your_script ``` 3. Run your script with the following command: ```sh torchrun --nnodes="{$NUM_NODES}" --nproc_per_node="{$NUM_TRAINERS}" --your_script.py --args_to_your_script ``` ## Demo Please check out `run.sh` to see how to perform our self-training algorithm with a `BERT` Base model on the SciTail science entailment dataset using 8 labeled examples per class. You can configure your training environment by specifying `NUM_NODES` and `NUM_TRAINERS` (number of processes per node). To launch the script, simply run `source run.sh`. ## How to cite If you extend or use this code, please cite the [paper](https://arxiv.org/abs/2109.06270) where it was introduced: ```bibtex @inproceedings{vu-etal-2021-strata, title = "{ST}ra{TA}: Self-Training with Task Augmentation for Better Few-shot Learning", author = "Vu, Tu and Luong, Minh-Thang and Le, Quoc and Simon, Grady and Iyyer, Mohit", booktitle = "Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing", month = nov, year = "2021", address = "Online and Punta Cana, Dominican Republic", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.emnlp-main.462", doi = "10.18653/v1/2021.emnlp-main.462", pages = "5715--5731", } ```
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/self-training-text-classification/run.sh
# Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #!/bin/bash # Create a virtual environment conda deactivate conda update conda -y conda update anaconda -y pip install --upgrade pip python3 -m pip install --user virtualenv conda create -n strata python=3.9 -y conda activate strata # Install all necessary packages pip install transformers pip install -r requirements.txt # Download and prepare data WORK_DIR="/tmp/strata" rm -rf "${WORK_DIR}" && mkdir -p "${WORK_DIR}" wget https://storage.googleapis.com/gresearch/strata/demo.zip -P "${WORK_DIR}" DEMO_ZIP_FILE="${WORK_DIR}/demo.zip" unzip "${DEMO_ZIP_FILE}" -d "${WORK_DIR}" && rm "${DEMO_ZIP_FILE}" DATA_DIR="${WORK_DIR}/demo/scitail-8" OUTPUT_DIR="/tmp/output" rm -rf "${OUTPUT_DIR}" && mkdir -p "${OUTPUT_DIR}" # Specific hyperparameters MODEL_NAME_OR_PATH="bert-base-uncased" NUM_NODES=1 NUM_TRAINERS=4 LAUNCH_SCRIPT="torchrun --nnodes='${NUM_NODES}' --nproc_per_node='${NUM_TRAINERS}' python -c" MAX_SELFTRAIN_ITERATIONS=100 TRAIN_FILE="train.csv" INFER_FILE="infer.csv" EVAL_FILE="eval_256.csv" MAX_STEPS=100000 # Start self-training ${LAUNCH_SCRIPT} " import os from selftraining import selftrain data_dir = '${DATA_DIR}' parameters_dict = { 'max_selftrain_iterations': ${MAX_SELFTRAIN_ITERATIONS}, 'model_name_or_path': '${MODEL_NAME_OR_PATH}', 'output_dir': '${OUTPUT_DIR}', 'train_file': os.path.join(data_dir, '${TRAIN_FILE}'), 'infer_file': os.path.join(data_dir, '${INFER_FILE}'), 'eval_file': os.path.join(data_dir, '${EVAL_FILE}'), 'eval_strategy': 'steps', 'task_name': 'scitail', 'label_list': ['entails', 'neutral'], 'per_device_train_batch_size': 32, 'per_device_eval_batch_size': 8, 'max_length': 128, 'learning_rate': 2e-5, 'max_steps': ${MAX_STEPS}, 'eval_steps': 1, 'early_stopping_patience': 50, 'overwrite_output_dir': True, 'do_filter_by_confidence': False, 'do_filter_by_val_performance': True, 'finetune_on_labeled_data': False, 'seed': 42, } selftrain(**parameters_dict) "
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/requirements.txt
appdirs==1.4.3 argon2-cffi==20.1.0 async-generator==1.10 attrs==20.2.0 backcall==0.2.0 CacheControl==0.12.6 certifi==2023.7.22 cffi==1.14.2 chardet==3.0.4 click==7.1.2 colorama==0.4.3 contextlib2==0.6.0 cycler==0.10.0 datasets==1.0.0 decorator==4.4.2 defusedxml==0.6.0 dill==0.3.2 distlib==0.3.0 distro==1.4.0 entrypoints==0.3 filelock==3.0.12 future==0.18.3 html5lib==1.0.1 idna==2.8 ipaddr==2.2.0 ipykernel==5.3.4 ipython ipython-genutils==0.2.0 ipywidgets==7.5.1 jedi==0.17.2 Jinja2>=2.11.3 joblib==1.2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.7 jupyter-console==6.2.0 jupyter-core==4.6.3 jupyterlab-pygments==0.1.1 kiwisolver==1.2.0 lockfile==0.12.2 MarkupSafe==1.1.1 matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 nbconvert==6.5.1 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 numpy==1.22.0 opencv-python==4.4.0.42 packaging==20.3 pandas==1.1.2 pandocfilters==1.4.2 parso==0.7.1 pep517==0.8.2 pexpect==4.8.0 pickleshare==0.7.5 Pillow>=8.1.1 progress==1.5 prometheus-client==0.8.0 prompt-toolkit==3.0.7 ptyprocess==0.6.0 pyaml==20.4.0 pyarrow==15.0.0 pycparser==2.20 Pygments>=2.7.4 pyparsing==2.4.6 pyrsistent==0.16.0 python-dateutil==2.8.1 pytoml==0.1.21 pytz==2020.1 PyYAML>=5.4 pyzmq==19.0.2 qtconsole==4.7.7 QtPy==1.9.0 regex==2020.7.14 requests==2.31.0 retrying==1.3.3 sacremoses==0.0.43 Send2Trash==1.5.0 sentencepiece==0.1.91 six==1.14.0 terminado==0.8.3 testpath==0.4.4 tokenizers==0.8.1rc2 torch==1.13.1 torchvision==0.7.0 tornado==6.3.3 tqdm==4.48.2 traitlets git+https://github.com/huggingface/transformers.git urllib3==1.26.18 wcwidth==0.2.5 webencodings==0.5.1 wget==3.2 widgetsnbextension==3.5.1 xxhash==2.0.0
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/utils.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :) Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cv2 import numpy as np import requests import wget from filelock import FileLock from huggingface_hub.utils import insecure_hashlib from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) default_cache_path = os.path.join(torch_cache_home, "transformers") CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) CONFIG = os.path.join(PATH, "config.yaml") ATTRIBUTES = os.path.join(PATH, "attributes.txt") OBJECTS = os.path.join(PATH, "objects.txt") PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) WEIGHTS_NAME = "pytorch_model.bin" CONFIG_NAME = "config.yaml" def load_labels(objs=OBJECTS, attrs=ATTRIBUTES): vg_classes = [] with open(objs) as f: for object in f.readlines(): vg_classes.append(object.split(",")[0].lower().strip()) vg_attrs = [] with open(attrs) as f: for object in f.readlines(): vg_attrs.append(object.split(",")[0].lower().strip()) return vg_classes, vg_attrs def load_checkpoint(ckp): r = OrderedDict() with open(ckp, "rb") as f: ckp = pkl.load(f)["model"] for k in copy.deepcopy(list(ckp.keys())): v = ckp.pop(k) if isinstance(v, np.ndarray): v = torch.tensor(v) else: assert isinstance(v, torch.tensor), type(v) r[k] = v return r class Config: _pointer = {} def __init__(self, dictionary: dict, name: str = "root", level=0): self._name = name self._level = level d = {} for k, v in dictionary.items(): if v is None: raise ValueError() k = copy.deepcopy(k) v = copy.deepcopy(v) if isinstance(v, dict): v = Config(v, name=k, level=level + 1) d[k] = v setattr(self, k, v) self._pointer = d def __repr__(self): return str(list((self._pointer.keys()))) def __setattr__(self, key, val): self.__dict__[key] = val self.__dict__[key.upper()] = val levels = key.split(".") last_level = len(levels) - 1 pointer = self._pointer if len(levels) > 1: for i, l in enumerate(levels): if hasattr(self, l) and isinstance(getattr(self, l), Config): setattr(getattr(self, l), ".".join(levels[i:]), val) if l == last_level: pointer[l] = val else: pointer = pointer[l] def to_dict(self): return self._pointer def dump_yaml(self, data, file_name): with open(f"{file_name}", "w") as stream: dump(data, stream) def dump_json(self, data, file_name): with open(f"{file_name}", "w") as stream: json.dump(data, stream) @staticmethod def load_yaml(config): with open(config) as stream: data = load(stream, Loader=Loader) return data def __str__(self): t = " " if self._name != "root": r = f"{t * (self._level-1)}{self._name}:\n" else: r = "" level = self._level for i, (k, v) in enumerate(self._pointer.items()): if isinstance(v, Config): r += f"{t * (self._level)}{v}\n" self._level += 1 else: r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n" self._level = level return r[:-1] @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs): config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) return cls(config_dict) @classmethod def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs): cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) if os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path else: config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False) try: # Load from URL or cache if already cached resolved_config_file = cached_path( config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) # Load config dict if resolved_config_file is None: raise EnvironmentError config_file = Config.load_yaml(resolved_config_file) except EnvironmentError: msg = "Can't load config for" raise EnvironmentError(msg) if resolved_config_file == config_file: print("loading configuration file from path") else: print("loading configuration file cache") return Config.load_yaml(resolved_config_file), kwargs # quick compare tensors def compare(in_tensor): out_tensor = torch.load("dump.pt", map_location=in_tensor.device) n1 = in_tensor.numpy() n2 = out_tensor.numpy()[0] print(n1.shape, n1[0, 0, :5]) print(n2.shape, n2[0, 0, :5]) assert np.allclose(n1, n2, rtol=0.01, atol=0.1), ( f"{sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if x is False])/len(n1.flatten())*100:.4f} %" " element-wise mismatch" ) raise Exception("tensors are all good") # Hugging face functions below def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX legacy_format = "/" not in model_id if legacy_format: return f"{endpoint}/{model_id}-{filename}" else: return f"{endpoint}/{model_id}/{filename}" def http_get( url, temp_file, proxies=None, resume_size=0, user_agent=None, ): ua = "python/{}".format(sys.version.split()[0]) if _torch_available: ua += "; torch/{}".format(torch.__version__) if isinstance(user_agent, dict): ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent headers = {"user-agent": ua} if resume_size > 0: headers["Range"] = "bytes=%d-" % (resume_size,) response = requests.get(url, stream=True, proxies=proxies, headers=headers) if response.status_code == 416: # Range not satisfiable return content_length = response.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None progress = tqdm( unit="B", unit_scale=True, total=total, initial=resume_size, desc="Downloading", ) for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent=None, local_files_only=False, ): if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) etag = None if not local_files_only: try: response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) if response.status_code == 200: etag = response.headers.get("ETag") except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(cache_path): return cache_path else: matching_files = [ file for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*") if not file.endswith(".json") and not file.endswith(".lock") ] if len(matching_files) > 0: return os.path.join(cache_dir, matching_files[-1]) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # If the download just completed while the lock was activated. if os.path.exists(cache_path) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: incomplete_path = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(incomplete_path, "a+b") as f: yield f temp_file_manager = _resumable_file_manager if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size else: resume_size = 0 else: temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) resume_size = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name, ) http_get( url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent, ) os.replace(temp_file.name, cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: json.dump(meta, meta_file) return cache_path def url_to_filename(url, etag=None): url_bytes = url.encode("utf-8") url_hash = insecure_hashlib.sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = insecure_hashlib.sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".h5"): filename += ".h5" return filename def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None, extract_compressed_file=False, force_extract=False, local_files_only=False, ): if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) return output_path_extracted return output_path def get_data(query, delim=","): assert isinstance(query, str) if os.path.isfile(query): with open(query) as f: data = eval(f.read()) else: req = requests.get(query) try: data = requests.json() except Exception: data = req.content.decode() assert data is not None, "could not connect" try: data = eval(data) except Exception: data = data.split("\n") req.close() return data def get_image_from_url(url): response = requests.get(url) img = np.array(Image.open(BytesIO(response.content))) return img # to load legacy frcnn checkpoint from detectron def load_frcnn_pkl_from_url(url): fn = url.split("/")[-1] if fn not in os.listdir(os.getcwd()): wget.download(url) with open(fn, "rb") as stream: weights = pkl.load(stream) model = weights.pop("model") new = {} for k, v in model.items(): new[k] = torch.from_numpy(v) if "running_var" in k: zero = torch.tensor([0]) k2 = k.replace("running_var", "num_batches_tracked") new[k2] = zero return new def get_demo_path(): print(f"{os.path.abspath(os.path.join(PATH, os.pardir))}/demo.ipynb") def img_tensorize(im, input_format="RGB"): assert isinstance(im, str) if os.path.isfile(im): img = cv2.imread(im) else: img = get_image_from_url(im) assert img is not None, f"could not connect to: {im}" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if input_format == "RGB": img = img[:, :, ::-1] return img def chunk(images, batch=1): return (images[i : i + batch] for i in range(0, len(images), batch))
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/visualizing_image.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import colorsys import io import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import numpy as np import torch from matplotlib.backends.backend_agg import FigureCanvasAgg from utils import img_tensorize _SMALL_OBJ = 1000 class SingleImageViz: def __init__( self, img, scale=1.2, edgecolor="g", alpha=0.5, linestyle="-", saveas="test_out.jpg", rgb=True, pynb=False, id2obj=None, id2attr=None, pad=0.7, ): """ img: an RGB image of shape (H, W, 3). """ if isinstance(img, torch.Tensor): img = img.numpy().astype("np.uint8") if isinstance(img, str): img = img_tensorize(img) assert isinstance(img, np.ndarray) width, height = img.shape[1], img.shape[0] fig = mplfigure.Figure(frameon=False) dpi = fig.get_dpi() width_in = (width * scale + 1e-2) / dpi height_in = (height * scale + 1e-2) / dpi fig.set_size_inches(width_in, height_in) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") ax.set_xlim(0.0, width) ax.set_ylim(height) self.saveas = saveas self.rgb = rgb self.pynb = pynb self.img = img self.edgecolor = edgecolor self.alpha = 0.5 self.linestyle = linestyle self.font_size = int(np.sqrt(min(height, width)) * scale // 3) self.width = width self.height = height self.scale = scale self.fig = fig self.ax = ax self.pad = pad self.id2obj = id2obj self.id2attr = id2attr self.canvas = FigureCanvasAgg(fig) def add_box(self, box, color=None): if color is None: color = self.edgecolor (x0, y0, x1, y1) = box width = x1 - x0 height = y1 - y0 self.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=color, linewidth=self.font_size // 3, alpha=self.alpha, linestyle=self.linestyle, ) ) def draw_boxes(self, boxes, obj_ids=None, obj_scores=None, attr_ids=None, attr_scores=None): if len(boxes.shape) > 2: boxes = boxes[0] if len(obj_ids.shape) > 1: obj_ids = obj_ids[0] if len(obj_scores.shape) > 1: obj_scores = obj_scores[0] if len(attr_ids.shape) > 1: attr_ids = attr_ids[0] if len(attr_scores.shape) > 1: attr_scores = attr_scores[0] if isinstance(boxes, torch.Tensor): boxes = boxes.numpy() if isinstance(boxes, list): boxes = np.array(boxes) assert isinstance(boxes, np.ndarray) areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) sorted_idxs = np.argsort(-areas).tolist() boxes = boxes[sorted_idxs] if boxes is not None else None obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))] assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] if obj_ids is not None: labels = self._create_text_labels_attr(obj_ids, obj_scores, attr_ids, attr_scores) for i in range(len(boxes)): color = assigned_colors[i] self.add_box(boxes[i], color) self.draw_labels(labels[i], boxes[i], color) def draw_labels(self, label, box, color): x0, y0, x1, y1 = box text_pos = (x0, y0) instance_area = (y1 - y0) * (x1 - x0) small = _SMALL_OBJ * self.scale if instance_area < small or y1 - y0 < 40 * self.scale: if y1 >= self.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.height * self.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) font_size *= 0.75 * self.font_size self.draw_text( text=label, position=text_pos, color=lighter_color, ) def draw_text( self, text, position, color="g", ha="left", ): rotation = 0 font_size = self.font_size color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) bbox = { "facecolor": "black", "alpha": self.alpha, "pad": self.pad, "edgecolor": "none", } x, y = position self.ax.text( x, y, text, size=font_size * self.scale, family="sans-serif", bbox=bbox, verticalalignment="top", horizontalalignment=ha, color=color, zorder=10, rotation=rotation, ) def save(self, saveas=None): if saveas is None: saveas = self.saveas if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"): cv2.imwrite( saveas, self._get_buffer()[:, :, ::-1], ) else: self.fig.savefig(saveas) def _create_text_labels_attr(self, classes, scores, attr_classes, attr_scores): labels = [self.id2obj[i] for i in classes] attr_labels = [self.id2attr[i] for i in attr_classes] labels = [ f"{label} {score:.2f} {attr} {attr_score:.2f}" for label, score, attr, attr_score in zip(labels, scores, attr_labels, attr_scores) ] return labels def _create_text_labels(self, classes, scores): labels = [self.id2obj[i] for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)] return labels def _random_color(self, maximum=255): idx = np.random.randint(0, len(_COLORS)) ret = _COLORS[idx] * maximum if not self.rgb: ret = ret[::-1] return ret def _get_buffer(self): if not self.pynb: s, (width, height) = self.canvas.print_to_buffer() if (width, height) != (self.width, self.height): img = cv2.resize(self.img, (width, height)) else: img = self.img else: buf = io.BytesIO() # works for cairo backend self.canvas.print_rgba(buf) width, height = self.width, self.height s = buf.getvalue() img = self.img buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) try: import numexpr as ne # fuse them with numexpr visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") except ImportError: alpha = alpha.astype("float32") / 255.0 visualized_image = img * (1 - alpha) + rgb * alpha return visualized_image.astype("uint8") def _change_color_brightness(self, color, brightness_factor): assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color # Color map _COLORS = ( np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000, ] ) .astype(np.float32) .reshape(-1, 3) )
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/extracting_data.py
import getopt import json import os # import numpy as np import sys from collections import OrderedDict import datasets import numpy as np import torch from modeling_frcnn import GeneralizedRCNN from processing_image import Preprocess from utils import Config """ USAGE: ``python extracting_data.py -i <img_dir> -o <dataset_file>.datasets <batch_size>`` """ TEST = False CONFIG = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") DEFAULT_SCHEMA = datasets.Features( OrderedDict( { "attr_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "attr_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "boxes": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype="float32"), "img_id": datasets.Value("int32"), "obj_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "obj_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "roi_features": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype="float32"), "sizes": datasets.Sequence(length=2, feature=datasets.Value("float32")), "preds_per_image": datasets.Value(dtype="int32"), } ) ) class Extract: def __init__(self, argv=sys.argv[1:]): inputdir = None outputfile = None subset_list = None batch_size = 1 opts, args = getopt.getopt(argv, "i:o:b:s", ["inputdir=", "outfile=", "batch_size=", "subset_list="]) for opt, arg in opts: if opt in ("-i", "--inputdir"): inputdir = arg elif opt in ("-o", "--outfile"): outputfile = arg elif opt in ("-b", "--batch_size"): batch_size = int(arg) elif opt in ("-s", "--subset_list"): subset_list = arg assert inputdir is not None # and os.path.isdir(inputdir), f"{inputdir}" assert outputfile is not None and not os.path.isfile(outputfile), f"{outputfile}" if subset_list is not None: with open(os.path.realpath(subset_list)) as f: self.subset_list = {self._vqa_file_split()[0] for x in tryload(f)} else: self.subset_list = None self.config = CONFIG if torch.cuda.is_available(): self.config.model.device = "cuda" self.inputdir = os.path.realpath(inputdir) self.outputfile = os.path.realpath(outputfile) self.preprocess = Preprocess(self.config) self.model = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.config) self.batch = batch_size if batch_size != 0 else 1 self.schema = DEFAULT_SCHEMA def _vqa_file_split(self, file): img_id = int(file.split(".")[0].split("_")[-1]) filepath = os.path.join(self.inputdir, file) return (img_id, filepath) @property def file_generator(self): batch = [] for i, file in enumerate(os.listdir(self.inputdir)): if self.subset_list is not None and i not in self.subset_list: continue batch.append(self._vqa_file_split(file)) if len(batch) == self.batch: temp = batch batch = [] yield list(map(list, zip(*temp))) for i in range(1): yield list(map(list, zip(*batch))) def __call__(self): # make writer if not TEST: writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile) # do file generator for i, (img_ids, filepaths) in enumerate(self.file_generator): images, sizes, scales_yx = self.preprocess(filepaths) output_dict = self.model( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=self.config.MAX_DETECTIONS, pad_value=0, return_tensors="np", location="cpu", ) output_dict["boxes"] = output_dict.pop("normalized_boxes") if not TEST: output_dict["img_id"] = np.array(img_ids) batch = self.schema.encode_batch(output_dict) writer.write_batch(batch) if TEST: break # finalizer the writer if not TEST: num_examples, num_bytes = writer.finalize() print(f"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb") def tryload(stream): try: data = json.load(stream) try: data = list(data.keys()) except Exception: data = [d["img_id"] for d in data] except Exception: try: data = eval(stream.read()) except Exception: data = stream.read().split("\n") return data if __name__ == "__main__": extract = Extract(sys.argv[1:]) extract() if not TEST: dataset = datasets.Dataset.from_file(extract.outputfile) # wala! # print(np.array(dataset[0:2]["roi_features"]).shape)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/demo.ipynb
# %pip install-r requirements.txtfrom IPython.display import Image, display import PIL.Image import io import torch import numpy as np from processing_image import Preprocess from visualizing_image import SingleImageViz from modeling_frcnn import GeneralizedRCNN from utils import Config import utils from transformers import VisualBertForQuestionAnswering, BertTokenizerFast # URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg" URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg" OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt" ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt" VQA_URL = "https://dl.fbaipublicfiles.com/pythia/data/answers_vqa.txt" # for visualizing output def showarray(a, fmt="jpeg"): a = np.uint8(np.clip(a, 0, 255)) f = io.BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue()))# load object, attribute, and answer labels objids = utils.get_data(OBJ_URL) attrids = utils.get_data(ATTR_URL) vqa_answers = utils.get_data(VQA_URL)# load models and model components frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg) image_preprocess = Preprocess(frcnn_cfg) bert_tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") visualbert_vqa = VisualBertForQuestionAnswering.from_pretrained("uclanlp/visualbert-vqa")# image viz frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids) # run frcnn images, sizes, scales_yx = image_preprocess(URL) output_dict = frcnn( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=frcnn_cfg.max_detections, return_tensors="pt", ) # add boxes and labels to the image frcnn_visualizer.draw_boxes( output_dict.get("boxes"), output_dict.pop("obj_ids"), output_dict.pop("obj_probs"), output_dict.pop("attr_ids"), output_dict.pop("attr_probs"), ) showarray(frcnn_visualizer._get_buffer())# test_questions_for_url1 = [ # "Where is this scene?", # "what is the man riding?", # "What is the man wearing?", # "What is the color of the horse?" # ] test_questions_for_url2 = [ "Where is the cat?", "What is near the disk?", "What is the color of the table?", "What is the color of the cat?", "What is the shape of the monitor?", ] # Very important that the boxes are normalized # normalized_boxes = output_dict.get("normalized_boxes") features = output_dict.get("roi_features")for test_question in test_questions_for_url2: test_question = [test_question] inputs = bert_tokenizer( test_question, padding="max_length", max_length=20, truncation=True, return_token_type_ids=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) output_vqa = visualbert_vqa( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, visual_embeds=features, visual_attention_mask=torch.ones(features.shape[:-1]), token_type_ids=inputs.token_type_ids, output_attentions=False, ) # get prediction pred_vqa = output_vqa["logits"].argmax(-1) print("Question:", test_question) print("prediction from VisualBert VQA:", vqa_answers[pred_vqa])
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/processing_image.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class ResizeShortestEdge: def __init__(self, short_edge_length, max_size=sys.maxsize): """ Args: short_edge_length (list[min, max]) max_size (int): maximum allowed longest edge length. """ self.interp_method = "bilinear" self.max_size = max_size self.short_edge_length = short_edge_length def __call__(self, imgs): img_augs = [] for img in imgs: h, w = img.shape[:2] # later: provide list and randomly choose index for resize size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) if size == 0: return img scale = size * 1.0 / min(h, w) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size if max(newh, neww) > self.max_size: scale = self.max_size * 1.0 / max(newh, neww) newh = newh * scale neww = neww * scale neww = int(neww + 0.5) newh = int(newh + 0.5) if img.dtype == np.uint8: pil_image = Image.fromarray(img) pil_image = pil_image.resize((neww, newh), PILImageResampling.BILINEAR) img = np.asarray(pil_image) else: img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw img = nn.functional.interpolate( img, (newh, neww), mode=self.interp_method, align_corners=False ).squeeze(0) img_augs.append(img) return img_augs class Preprocess: def __init__(self, cfg): self.aug = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST) self.input_format = cfg.INPUT.FORMAT self.size_divisibility = cfg.SIZE_DIVISIBILITY self.pad_value = cfg.PAD_VALUE self.max_image_size = cfg.INPUT.MAX_SIZE_TEST self.device = cfg.MODEL.DEVICE self.pixel_std = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) self.pixel_mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std def pad(self, images): max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) image_sizes = [im.shape[-2:] for im in images] images = [ nn.functional.pad( im, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, ) for size, im in zip(image_sizes, images) ] return torch.stack(images), torch.tensor(image_sizes) def __call__(self, images, single_image=False): with torch.no_grad(): if not isinstance(images, list): images = [images] if single_image: assert len(images) == 1 for i in range(len(images)): if isinstance(images[i], torch.Tensor): images.insert(i, images.pop(i).to(self.device).float()) elif not isinstance(images[i], torch.Tensor): images.insert( i, torch.as_tensor(img_tensorize(images.pop(i), input_format=self.input_format)) .to(self.device) .float(), ) # resize smallest edge raw_sizes = torch.tensor([im.shape[:2] for im in images]) images = self.aug(images) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic images = [self.normalizer(x) for x in images] # now pad them to do the following operations images, sizes = self.pad(images) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad scales_yx = torch.true_divide(raw_sizes, sizes) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _scale_box(boxes, scale_yx): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _clip_box(tensor, box_size: Tuple[int, int]): assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size tensor[:, 0].clamp_(min=0, max=w) tensor[:, 1].clamp_(min=0, max=h) tensor[:, 2].clamp_(min=0, max=w) tensor[:, 3].clamp_(min=0, max=h)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/README.md
# VisualBERT Demo This demo shows usage of VisualBERT VQA model and is adapted from LXMERT demo present [here](https://github.com/huggingface/transformers/blob/main/examples/research_projects/lxmert/demo.ipynb). 1. make a virtualenv: ``virtualenv venv`` and activate ``source venv/bin/activate`` 2. install reqs: ``pip install -r ./requirements.txt`` 3. usage is as shown in demo.ipynb
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/visual_bert/modeling_frcnn.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 && Huggingface Co. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import itertools import math import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, namedtuple from typing import Dict, List, Tuple import numpy as np import torch from torch import nn from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops import RoIPool from torchvision.ops.boxes import batched_nms, nms from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint # other: def norm_box(boxes, raw_sizes): if not isinstance(boxes, torch.Tensor): normalized_boxes = boxes.copy() else: normalized_boxes = boxes.clone() normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1] normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0] return normalized_boxes def pad_list_tensors( list_tensors, preds_per_image, max_detections=None, return_tensors=None, padding=None, pad_value=0, location=None, ): """ location will always be cpu for np tensors """ if location is None: location = "cpu" assert return_tensors in {"pt", "np", None} assert padding in {"max_detections", "max_batch", None} new = [] if padding is None: if return_tensors is None: return list_tensors elif return_tensors == "pt": if not isinstance(list_tensors, torch.Tensor): return torch.stack(list_tensors).to(location) else: return list_tensors.to(location) else: if not isinstance(list_tensors, list): return np.array(list_tensors.to(location)) else: return list_tensors.to(location) if padding == "max_detections": assert max_detections is not None, "specify max number of detections per batch" elif padding == "max_batch": max_detections = max(preds_per_image) for i in range(len(list_tensors)): too_small = False tensor_i = list_tensors.pop(0) if tensor_i.ndim < 2: too_small = True tensor_i = tensor_i.unsqueeze(-1) assert isinstance(tensor_i, torch.Tensor) tensor_i = nn.functional.pad( input=tensor_i, pad=(0, 0, 0, max_detections - preds_per_image[i]), mode="constant", value=pad_value, ) if too_small: tensor_i = tensor_i.squeeze(-1) if return_tensors is None: if location == "cpu": tensor_i = tensor_i.cpu() tensor_i = tensor_i.tolist() if return_tensors == "np": if location == "cpu": tensor_i = tensor_i.cpu() tensor_i = tensor_i.numpy() else: if location == "cpu": tensor_i = tensor_i.cpu() new.append(tensor_i) if return_tensors == "np": return np.stack(new, axis=0) elif return_tensors == "pt" and not isinstance(new, torch.Tensor): return torch.stack(new, dim=0) else: return list_tensors def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd): scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // 4 # Convert to Boxes to use the `clip` function ... boxes = boxes.reshape(-1, 4) _clip_box(boxes, image_shape) boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4 # Select max scores max_scores, max_classes = scores.max(1) # R x C --> R num_objs = boxes.size(0) boxes = boxes.view(-1, 4) idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes max_boxes = boxes[idxs] # Select max boxes according to the max scores. # Apply NMS keep = nms(max_boxes, max_scores, nms_thresh) keep = keep[:maxd] if keep.shape[-1] >= mind and keep.shape[-1] <= maxd: max_boxes, max_scores = max_boxes[keep], max_scores[keep] classes = max_classes[keep] return max_boxes, max_scores, classes, keep else: return None # Helper Functions def _clip_box(tensor, box_size: Tuple[int, int]): assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size tensor[:, 0].clamp_(min=0, max=w) tensor[:, 1].clamp_(min=0, max=h) tensor[:, 2].clamp_(min=0, max=w) tensor[:, 3].clamp_(min=0, max=h) def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor: widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def get_norm(norm, out_channels): if isinstance(norm, str): if len(norm) == 0: return None norm = { "BN": BatchNorm2d, "GN": lambda channels: nn.GroupNorm(32, channels), "nnSyncBN": nn.SyncBatchNorm, # keep for debugging "": lambda x: x, }[norm] return norm(out_channels) def _create_grid_offsets(size: List[int], stride: int, offset: float, device): grid_height, grid_width = size shifts_x = torch.arange( offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device, ) shifts_y = torch.arange( offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device, ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) return shift_x, shift_y def build_backbone(cfg): input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) norm = cfg.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.RESNETS.STEM_OUT_CHANNELS, norm=norm, caffe_maxpool=cfg.MODEL.MAX_POOL, ) freeze_at = cfg.BACKBONE.FREEZE_AT if freeze_at >= 1: for p in stem.parameters(): p.requires_grad = False out_features = cfg.RESNETS.OUT_FEATURES depth = cfg.RESNETS.DEPTH num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.RESNETS.RES5_DILATION assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] stages = [] out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] max_stage_idx = max(out_stage_idx) for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "first_stride": first_stride, "in_channels": in_channels, "bottleneck_channels": bottleneck_channels, "out_channels": out_channels, "num_groups": num_groups, "norm": norm, "stride_in_1x1": stride_in_1x1, "dilation": dilation, } stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 if freeze_at >= stage_idx: for block in blocks: block.freeze() stages.append(blocks) return ResNet(stem, stages, out_features=out_features) def find_top_rpn_proposals( proposals, pred_objectness_logits, images, image_sizes, nms_thresh, pre_nms_topk, post_nms_topk, min_box_side_len, training, ): """Args: proposals (list[Tensor]): (L, N, Hi*Wi*A, 4). pred_objectness_logits: tensors of length L. nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): before nms post_nms_topk (int): after nms min_box_side_len (float): minimum proposal box side training (bool): True if proposals are to be used in training, Returns: results (List[Dict]): stores post_nms_topk object proposals for image i. """ num_images = len(images) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits): Hi_Wi_A = logits_i.shape[1] num_proposals_i = min(pre_nms_topk, Hi_Wi_A) # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) logits_i, idx = logits_i.sort(descending=True, dim=1) topk_scores_i = logits_i[batch_idx, :num_proposals_i] topk_idx = idx[batch_idx, :num_proposals_i] # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = torch.cat(topk_scores, dim=1) topk_proposals = torch.cat(topk_proposals, dim=1) level_ids = torch.cat(level_ids, dim=0) # if I change to batched_nms, I wonder if this will make a difference # 3. For each image, run a per-level NMS, and choose topk results. results = [] for n, image_size in enumerate(image_sizes): boxes = topk_proposals[n] scores_per_img = topk_scores[n] # I will have to take a look at the boxes clip method _clip_box(boxes, image_size) # filter empty boxes keep = _nonempty_boxes(boxes, threshold=min_box_side_len) lvl = level_ids if keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = ( boxes[keep], scores_per_img[keep], level_ids[keep], ) keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh) keep = keep[:post_nms_topk] res = (boxes[keep], scores_per_img[keep]) results.append(res) # I wonder if it would be possible for me to pad all these things. return results def subsample_labels(labels, num_samples, positive_fraction, bg_label): """ Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1) negative = torch.nonzero(labels == bg_label).squeeze(1) num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx def add_ground_truth_to_proposals(gt_boxes, proposals): raise NotImplementedError() def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): raise NotImplementedError() def _fmt_box_list(box_tensor, batch_index: int): repeated_index = torch.full( (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device, ) return torch.cat((repeated_index, box_tensor), dim=1) def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]): pooler_fmt_boxes = torch.cat( [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], dim=0, ) return pooler_fmt_boxes def assign_boxes_to_levels( box_lists: List[torch.Tensor], min_level: int, max_level: int, canonical_box_size: int, canonical_level: int, ): box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists])) # Eqn.(1) in FPN paper level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)) # clamp level to (min, max), in case the box size is too large or too small # for the available feature maps level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) return level_assignments.to(torch.int64) - min_level # Helper Classes class _NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return _NewEmptyTensorOp.apply(grad, shape), None class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): def __new__(cls, *, channels=None, height=None, width=None, stride=None): return super().__new__(cls, channels, height, width, stride) class Box2BoxTransform(object): """ This R-CNN transformation scales the box's width and height by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). """ def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None): """ Args: weights (4-element tuple): Scaling factors that are applied to the (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set such that the deltas have unit variance; now they are treated as hyperparameters of the system. scale_clamp (float): When predicting deltas, the predicted box scaling factors (dw and dh) are clamped such that they are <= scale_clamp. """ self.weights = weights if scale_clamp is not None: self.scale_clamp = scale_clamp else: """ Value for clamping large dw and dh predictions. The heuristic is that we clamp such that dw and dh are no larger than what would transform a 16px box into a 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). """ self.scale_clamp = math.log(1000.0 / 16) def get_deltas(self, src_boxes, target_boxes): """ Get box regression transformation deltas (dx, dy, dw, dh) that can be used to transform the `src_boxes` into the `target_boxes`. That is, the relation ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless any delta is too large and is clamped). Args: src_boxes (Tensor): source boxes, e.g., object proposals target_boxes (Tensor): target of the transformation, e.g., ground-truth boxes. """ assert isinstance(src_boxes, torch.Tensor), type(src_boxes) assert isinstance(target_boxes, torch.Tensor), type(target_boxes) src_widths = src_boxes[:, 2] - src_boxes[:, 0] src_heights = src_boxes[:, 3] - src_boxes[:, 1] src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights target_widths = target_boxes[:, 2] - target_boxes[:, 0] target_heights = target_boxes[:, 3] - target_boxes[:, 1] target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights wx, wy, ww, wh = self.weights dx = wx * (target_ctr_x - src_ctr_x) / src_widths dy = wy * (target_ctr_y - src_ctr_y) / src_heights dw = ww * torch.log(target_widths / src_widths) dh = wh * torch.log(target_heights / src_heights) deltas = torch.stack((dx, dy, dw, dh), dim=1) assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" return deltas def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. deltas[i] represents k potentially different class-specific box transformations for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 4) """ boxes = boxes.to(deltas.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(deltas) pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 return pred_boxes class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__( self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False, ): """ Args: thresholds (list): a list of thresholds used to stratify predictions into levels. labels (list): a list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ thresholds = thresholds[:] assert thresholds[0] > 0 thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) assert all(label_i in [-1, 0, 1] for label_i in labels) assert len(labels) == len(thresholds) - 1 self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of Faster R-CNN. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None] if of_quality_inds.dim() == 0: (_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1) else: (_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1) match_labels[pred_inds_with_highest_quality] = 1 class RPNOutputs(object): def __init__( self, box2box_transform, anchor_matcher, batch_size_per_image, positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, boundary_threshold=0, gt_boxes=None, smooth_l1_beta=0.0, ): """ Args: box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations. anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels. batch_size_per_image (int): number of proposals to sample when training positive_fraction (float): target fraction of sampled proposals that should be positive images (ImageList): :class:`ImageList` instance representing N input images pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W) pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi) anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training. gt_boxes (list[Boxes], optional): A list of N elements. smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored """ self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction self.pred_objectness_logits = pred_objectness_logits self.pred_anchor_deltas = pred_anchor_deltas self.anchors = anchors self.gt_boxes = gt_boxes self.num_feature_maps = len(pred_objectness_logits) self.num_images = len(images) self.boundary_threshold = boundary_threshold self.smooth_l1_beta = smooth_l1_beta def _get_ground_truth(self): raise NotImplementedError() def predict_proposals(self): # pred_anchor_deltas: (L, N, ? Hi, Wi) # anchors:(N, L, -1, B) # here we loop over specific feature map, NOT images proposals = [] anchors = self.anchors.transpose(0, 1) for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas): B = anchors_i.size(-1) N, _, Hi, Wi = pred_anchor_deltas_i.shape anchors_i = anchors_i.flatten(start_dim=0, end_dim=1) pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B) proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) # Append feature map proposals with shape (N, Hi*Wi*A, B) proposals.append(proposals_i.view(N, -1, B)) proposals = torch.stack(proposals) return proposals def predict_objectness_logits(self): """ Returns: pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A). """ pred_objectness_logits = [ # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) score.permute(0, 2, 3, 1).reshape(self.num_images, -1) for score in self.pred_objectness_logits ] return pred_objectness_logits # Main Classes class Conv2d(nn.Conv2d): def __init__(self, *args, **kwargs): norm = kwargs.pop("norm", None) activation = kwargs.pop("activation", None) super().__init__(*args, **kwargs) self.norm = norm self.activation = activation def forward(self, x): if x.numel() == 0 and self.training: assert not isinstance(self.norm, nn.SyncBatchNorm) if x.numel() == 0: assert not isinstance(self.norm, nn.GroupNorm) output_shape = [ (i + 2 * p - (di * (k - 1) + 1)) // s + 1 for i, p, di, k, s in zip( x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride, ) ] output_shape = [x.shape[0], self.weight.shape[0]] + output_shape empty = _NewEmptyTensorOp.apply(x, output_shape) if self.training: _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + _dummy else: return empty x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class LastLevelMaxPool(nn.Module): """ This module is used in the original FPN to generate a downsampled P6 feature from P5. """ def __init__(self): super().__init__() self.num_levels = 1 self.in_feature = "p5" def forward(self, x): return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)] class LastLevelP6P7(nn.Module): """ This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. """ def __init__(self, in_channels, out_channels): super().__init__() self.num_levels = 2 self.in_feature = "res5" self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) def forward(self, c5): p6 = self.p6(c5) p7 = self.p7(nn.functional.relu(p6)) return [p6, p7] class BasicStem(nn.Module): def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False): super().__init__() self.conv1 = Conv2d( in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels), ) self.caffe_maxpool = caffe_maxpool # use pad 1 instead of pad zero def forward(self, x): x = self.conv1(x) x = nn.functional.relu_(x) if self.caffe_maxpool: x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True) else: x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x @property def out_channels(self): return self.conv1.out_channels @property def stride(self): return 4 # = stride 2 conv -> stride 2 max pool class ResNetBlockBase(nn.Module): def __init__(self, in_channels, out_channels, stride): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def freeze(self): for p in self.parameters(): p.requires_grad = False return self class BottleneckBlock(ResNetBlockBase): def __init__( self, in_channels, out_channels, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, ): super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) def forward(self, x): out = self.conv1(x) out = nn.functional.relu_(out) out = self.conv2(out) out = nn.functional.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = nn.functional.relu_(out) return out class Backbone(nn.Module, metaclass=ABCMeta): def __init__(self): super().__init__() @abstractmethod def forward(self): pass @property def size_divisibility(self): """ Some backbones require the input height and width to be divisible by a specific integer. This is typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. """ return 0 def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name], ) for name in self._out_features } @property def out_features(self): """deprecated""" return self._out_features @property def out_feature_strides(self): """deprecated""" return {f: self._out_feature_strides[f] for f in self._out_features} @property def out_feature_channels(self): """deprecated""" return {f: self._out_feature_channels[f] for f in self._out_features} class ResNet(Backbone): def __init__(self, stem, stages, num_classes=None, out_features=None): """ Args: stem (nn.Module): a stem module stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`. num_classes (None or int): if None, will not perform classification. out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in: "stem", "linear", or "res2" ... If None, will return the output of the last layer. """ super(ResNet, self).__init__() self.stem = stem self.num_classes = num_classes current_stride = self.stem.stride self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": self.stem.out_channels} self.stages_and_names = [] for i, blocks in enumerate(stages): for block in blocks: assert isinstance(block, ResNetBlockBase), block curr_channels = block.out_channels stage = nn.Sequential(*blocks) name = "res" + str(i + 2) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * np.prod([k.stride for k in blocks]) ) self._out_feature_channels[name] = blocks[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(curr_channels, num_classes) # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "The 1000-way fully-connected layer is initialized by # drawing weights from a zero-mean Gaussian with std of 0.01." nn.init.normal_(self.linear.weight, stddev=0.01) name = "linear" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for stage, name in self.stages_and_names: x = stage(x) if name in self._out_features: outputs[name] = x if self.num_classes is not None: x = self.avgpool(x) x = self.linear(x) if "linear" in self._out_features: outputs["linear"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name], ) for name in self._out_features } @staticmethod def make_stage( block_class, num_blocks, first_stride=None, *, in_channels, out_channels, **kwargs, ): """ Usually, layers that produce the same feature map spatial size are defined as one "stage". Under such definition, stride_per_block[1:] should all be 1. """ if first_stride is not None: assert "stride" not in kwargs and "stride_per_block" not in kwargs kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1) blocks = [] for i in range(num_blocks): curr_kwargs = {} for k, v in kwargs.items(): if k.endswith("_per_block"): assert ( len(v) == num_blocks ), f"Argument '{k}' of make_stage should have the same length as num_blocks={num_blocks}." newk = k[: -len("_per_block")] assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" curr_kwargs[newk] = v[i] else: curr_kwargs[k] = v blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)) in_channels = out_channels return blocks class ROIPooler(nn.Module): """ Region of interest feature map pooler that supports pooling from one or more feature maps. """ def __init__( self, output_size, scales, sampling_ratio, canonical_box_size=224, canonical_level=4, ): super().__init__() # assumption that stride is a power of 2. min_level = -math.log2(scales[0]) max_level = -math.log2(scales[-1]) # a bunch of testing assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)) assert len(scales) == max_level - min_level + 1, "not pyramid" assert 0 < min_level and min_level <= max_level if isinstance(output_size, int): output_size = (output_size, output_size) assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int) if len(scales) > 1: assert min_level <= canonical_level and canonical_level <= max_level assert canonical_box_size > 0 self.output_size = output_size self.min_level = int(min_level) self.max_level = int(max_level) self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales) self.canonical_level = canonical_level self.canonical_box_size = canonical_box_size def forward(self, feature_maps, boxes): """ Args: feature_maps: List[torch.Tensor(N,C,W,H)] box_lists: list[torch.Tensor]) Returns: A tensor of shape(N*B, Channels, output_size, output_size) """ x = list(feature_maps.values()) num_level_assignments = len(self.level_poolers) assert len(x) == num_level_assignments and len(boxes) == x[0].size(0) pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes) if num_level_assignments == 1: return self.level_poolers[0](x[0], pooler_fmt_boxes) level_assignments = assign_boxes_to_levels( boxes, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level, ) num_boxes = len(pooler_fmt_boxes) num_channels = x[0].shape[1] output_size = self.output_size[0] dtype, device = x[0].dtype, x[0].device output = torch.zeros( (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device, ) for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): inds = torch.nonzero(level_assignments == level).squeeze(1) pooler_fmt_boxes_level = pooler_fmt_boxes[inds] output[inds] = pooler(x_level, pooler_fmt_boxes_level) return output class ROIOutputs(object): def __init__(self, cfg, training=False): self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) self.training = training self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST self.min_detections = cfg.MIN_DETECTIONS self.max_detections = cfg.MAX_DETECTIONS nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST if not isinstance(nms_thresh, list): nms_thresh = [nms_thresh] self.nms_thresh = nms_thresh def _predict_boxes(self, proposals, box_deltas, preds_per_image): num_pred = box_deltas.size(0) B = proposals[0].size(-1) K = box_deltas.size(-1) // B box_deltas = box_deltas.view(num_pred * K, B) proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B) proposals = proposals.reshape(-1, B) boxes = self.box2box_transform.apply_deltas(box_deltas, proposals) return boxes.view(num_pred, K * B).split(preds_per_image, dim=0) def _predict_objs(self, obj_logits, preds_per_image): probs = nn.functional.softmax(obj_logits, dim=-1) probs = probs.split(preds_per_image, dim=0) return probs def _predict_attrs(self, attr_logits, preds_per_image): attr_logits = attr_logits[..., :-1].softmax(-1) attr_probs, attrs = attr_logits.max(-1) return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0) @torch.no_grad() def inference( self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=None, ): # only the pred boxes is the preds_per_image = [p.size(0) for p in pred_boxes] boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image) obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image) features = features.split(preds_per_image, dim=0) # fun for each image too, also I can experiment and do multiple images final_results = [] zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes) for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped): for nms_t in self.nms_thresh: outputs = do_nms( boxes, obj_scores, size, self.score_thresh, nms_t, self.min_detections, self.max_detections, ) if outputs is not None: max_boxes, max_scores, classes, ids = outputs break if scales is not None: scale_yx = scales[i] max_boxes[:, 0::2] *= scale_yx[1] max_boxes[:, 1::2] *= scale_yx[0] final_results.append( ( max_boxes, classes, max_scores, attrs[ids], attr_probs[ids], features[i][ids], ) ) boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results)) return boxes, classes, class_probs, attrs, attr_probs, roi_features def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes): pass def __call__( self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=None, ): if self.training: raise NotImplementedError() return self.inference( obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=scales, ) class Res5ROIHeads(nn.Module): """ ROIHeads perform all per-region computation in an R-CNN. It contains logic of cropping the regions, extract per-region features (by the res-5 block in this case), and make per-region predictions. """ def __init__(self, cfg, input_shape): super().__init__() self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION self.in_features = cfg.ROI_HEADS.IN_FEATURES self.num_classes = cfg.ROI_HEADS.NUM_CLASSES self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT self.feature_strides = {k: v.stride for k, v in input_shape.items()} self.feature_channels = {k: v.channels for k, v in input_shape.items()} self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG self.stage_channel_factor = 2**3 # res5 is 8x res2 self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor # self.proposal_matcher = Matcher( # cfg.ROI_HEADS.IOU_THRESHOLDS, # cfg.ROI_HEADS.IOU_LABELS, # allow_low_quality_matches=False, # ) pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],) sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE use_attr = cfg.ROI_BOX_HEAD.ATTR num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS self.pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, ) self.res5 = self._build_res5_block(cfg) if not res5_halve: """ Modifications for VG in RoI heads: 1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1 2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2) """ self.res5[0].conv1.stride = (1, 1) self.res5[0].shortcut.stride = (1, 1) for i in range(3): self.res5[i].conv2.padding = (2, 2) self.res5[i].conv2.dilation = (2, 2) self.box_predictor = FastRCNNOutputLayers( self.out_channels, self.num_classes, self.cls_agnostic_bbox_reg, use_attr=use_attr, num_attrs=num_attrs, ) def _build_res5_block(self, cfg): stage_channel_factor = self.stage_channel_factor # res5 is 8x res2 num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group * stage_channel_factor out_channels = self.out_channels stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 norm = cfg.RESNETS.NORM blocks = ResNet.make_stage( BottleneckBlock, 3, first_stride=2, in_channels=out_channels // 2, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, norm=norm, stride_in_1x1=stride_in_1x1, ) return nn.Sequential(*blocks) def _shared_roi_transform(self, features, boxes): x = self.pooler(features, boxes) return self.res5(x) def forward(self, features, proposal_boxes, gt_boxes=None): if self.training: """ see https://github.com/airsplay/py-bottom-up-attention/\ blob/master/detectron2/modeling/roi_heads/roi_heads.py """ raise NotImplementedError() assert not proposal_boxes[0].requires_grad box_features = self._shared_roi_transform(features, proposal_boxes) feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled) return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled class AnchorGenerator(nn.Module): """ For a set of image sizes and feature maps, computes a set of anchors. """ def __init__(self, cfg, input_shape: List[ShapeSpec]): super().__init__() sizes = cfg.ANCHOR_GENERATOR.SIZES aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS self.strides = [x.stride for x in input_shape] self.offset = cfg.ANCHOR_GENERATOR.OFFSET assert 0.0 <= self.offset < 1.0, self.offset """ sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i 1. given in absolute lengths in units of the input image; 2. they do not dynamically scale if the input image size changes. aspect_ratios (list[list[float]]) strides (list[int]): stride of each input feature. """ self.num_features = len(self.strides) self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios)) self._spacial_feat_dim = 4 def _calculate_anchors(self, sizes, aspect_ratios): # If one size (or aspect ratio) is specified and there are multiple feature # maps, then we "broadcast" anchors of that single size (or aspect ratio) if len(sizes) == 1: sizes *= self.num_features if len(aspect_ratios) == 1: aspect_ratios *= self.num_features assert self.num_features == len(sizes) assert self.num_features == len(aspect_ratios) cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)] return cell_anchors @property def box_dim(self): return self._spacial_feat_dim @property def num_cell_anchors(self): """ Returns: list[int]: Each int is the number of anchors at every pixel location, on that feature map. """ return [len(cell_anchors) for cell_anchors in self.cell_anchors] def grid_anchors(self, grid_sizes): anchors = [] for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) return anchors def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): """ anchors are continuous geometric rectangles centered on one feature map point sample. We can later build the set of anchors for the entire feature map by tiling these tensors """ anchors = [] for size in sizes: area = size**2.0 for aspect_ratio in aspect_ratios: w = math.sqrt(area / aspect_ratio) h = aspect_ratio * w x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 anchors.append([x0, y0, x1, y1]) return nn.Parameter(torch.tensor(anchors)) def forward(self, features): """ Args: features List[torch.Tensor]: list of feature maps on which to generate anchors. Returns: torch.Tensor: a list of #image elements. """ num_images = features[0].size(0) grid_sizes = [feature_map.shape[-2:] for feature_map in features] anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps) return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0) class RPNHead(nn.Module): """ RPN classification and regression heads. Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas specifying how to deform each anchor into an object proposal. """ def __init__(self, cfg, input_shape: List[ShapeSpec]): super().__init__() # Standard RPN is shared across levels: in_channels = [s.channels for s in input_shape] assert len(set(in_channels)) == 1, "Each level must have the same channel!" in_channels = in_channels[0] anchor_generator = AnchorGenerator(cfg, input_shape) num_cell_anchors = anchor_generator.num_cell_anchors box_dim = anchor_generator.box_dim assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors" num_cell_anchors = num_cell_anchors[0] if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1: hid_channels = in_channels else: hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS # Modifications for VG in RPN (modeling/proposal_generator/rpn.py) # Use hidden dim instead fo the same dim as Res4 (in_channels) # 3x3 conv for the hidden representation self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1) # 1x1 conv for predicting objectness logits self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1) # 1x1 conv for predicting box2box transform deltas self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1) for layer in [self.conv, self.objectness_logits, self.anchor_deltas]: nn.init.normal_(layer.weight, std=0.01) nn.init.constant_(layer.bias, 0) def forward(self, features): """ Args: features (list[Tensor]): list of feature maps """ pred_objectness_logits = [] pred_anchor_deltas = [] for x in features: t = nn.functional.relu(self.conv(x)) pred_objectness_logits.append(self.objectness_logits(t)) pred_anchor_deltas.append(self.anchor_deltas(t)) return pred_objectness_logits, pred_anchor_deltas class RPN(nn.Module): """ Region Proposal Network, introduced by the Faster R-CNN paper. """ def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): super().__init__() self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE self.in_features = cfg.RPN.IN_FEATURES self.nms_thresh = cfg.RPN.NMS_THRESH self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE self.positive_fraction = cfg.RPN.POSITIVE_FRACTION self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA self.loss_weight = cfg.RPN.LOSS_WEIGHT self.pre_nms_topk = { True: cfg.RPN.PRE_NMS_TOPK_TRAIN, False: cfg.RPN.PRE_NMS_TOPK_TEST, } self.post_nms_topk = { True: cfg.RPN.POST_NMS_TOPK_TRAIN, False: cfg.RPN.POST_NMS_TOPK_TEST, } self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features]) self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS) self.anchor_matcher = Matcher( cfg.RPN.IOU_THRESHOLDS, cfg.RPN.IOU_LABELS, allow_low_quality_matches=True, ) self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features]) def training(self, images, image_shapes, features, gt_boxes): pass def inference(self, outputs, images, image_shapes, features, gt_boxes=None): outputs = find_top_rpn_proposals( outputs.predict_proposals(), outputs.predict_objectness_logits(), images, image_shapes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_side_len, self.training, ) results = [] for img in outputs: im_boxes, img_box_logits = img img_box_logits, inds = img_box_logits.sort(descending=True) im_boxes = im_boxes[inds] results.append((im_boxes, img_box_logits)) (proposal_boxes, logits) = tuple(map(list, zip(*results))) return proposal_boxes, logits def forward(self, images, image_shapes, features, gt_boxes=None): """ Args: images (torch.Tensor): input images of length `N` features (dict[str: Tensor]) gt_instances """ # features is dict, key = block level, v = feature_map features = [features[f] for f in self.in_features] pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) anchors = self.anchor_generator(features) outputs = RPNOutputs( self.box2box_transform, self.anchor_matcher, self.batch_size_per_image, self.positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, self.boundary_threshold, gt_boxes, self.smooth_l1_beta, ) # For RPN-only models, the proposals are the final output if self.training: raise NotImplementedError() return self.training(outputs, images, image_shapes, features, gt_boxes) else: return self.inference(outputs, images, image_shapes, features, gt_boxes) class FastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__( self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, use_attr=False, num_attrs=-1, ): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int) cls_agnostic_bbox_reg (bool) box_dim (int) """ super().__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) # (do + 1 for background class) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.use_attr = use_attr if use_attr: """ Modifications for VG in RoI heads Embedding: {num_classes + 1} --> {input_size // 8} Linear: {input_size + input_size // 8} --> {input_size // 4} Linear: {input_size // 4} --> {num_attrs + 1} """ self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8) self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4) self.attr_score = nn.Linear(input_size // 4, num_attrs + 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for item in [self.cls_score, self.bbox_pred]: nn.init.constant_(item.bias, 0) def forward(self, roi_features): if roi_features.dim() > 2: roi_features = torch.flatten(roi_features, start_dim=1) scores = self.cls_score(roi_features) proposal_deltas = self.bbox_pred(roi_features) if self.use_attr: _, max_class = scores.max(-1) # [b, c] --> [b] cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256] roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304] roi_features = self.fc_attr(roi_features) roi_features = nn.functional.relu(roi_features) attr_scores = self.attr_score(roi_features) return scores, attr_scores, proposal_deltas else: return scores, proposal_deltas class GeneralizedRCNN(nn.Module): def __init__(self, cfg): super().__init__() self.device = torch.device(cfg.MODEL.DEVICE) self.backbone = build_backbone(cfg) self.proposal_generator = RPN(cfg, self.backbone.output_shape()) self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape()) self.roi_outputs = ROIOutputs(cfg) self.to(self.device) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_cdn = kwargs.pop("use_cdn", True) # Load config if we don't provide a configuration if not isinstance(config, Config): config_path = config if config is not None else pretrained_model_name_or_path # try: config = Config.from_pretrained( config_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, ) # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( "Error no file named {} found in directory {} ".format( WEIGHTS_NAME, pretrained_model_name_or_path, ) ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert from_tf, "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=WEIGHTS_NAME, use_cdn=use_cdn, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) if resolved_archive_file is None: raise EnvironmentError except EnvironmentError: msg = f"Can't load weights for '{pretrained_model_name_or_path}'." raise EnvironmentError(msg) if resolved_archive_file == archive_file: print("loading weights file {}".format(archive_file)) else: print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config) if state_dict is None: try: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") except Exception: state_dict = load_checkpoint(resolved_archive_file) except Exception: raise OSError( "Unable to load weights from pytorch checkpoint file. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " ) missing_keys = [] unexpected_keys = [] error_msgs = [] # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata model_to_load = model model_to_load.load_state_dict(state_dict) if model.__class__.__name__ != model_to_load.__class__.__name__: base_model_state_dict = model_to_load.state_dict().keys() head_model_state_dict_without_base_prefix = [ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() ] missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) if len(unexpected_keys) > 0: print( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: print( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: print( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( model.__class__.__name__, "\n\t".join(error_msgs) ) ) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() return model def forward( self, images, image_shapes, gt_boxes=None, proposals=None, scales_yx=None, **kwargs, ): """ kwargs: max_detections (int), return_tensors {"np", "pt", None}, padding {None, "max_detections"}, pad_value (int), location = {"cuda", "cpu"} """ if self.training: raise NotImplementedError() return self.inference( images=images, image_shapes=image_shapes, gt_boxes=gt_boxes, proposals=proposals, scales_yx=scales_yx, **kwargs, ) @torch.no_grad() def inference( self, images, image_shapes, gt_boxes=None, proposals=None, scales_yx=None, **kwargs, ): # run images through backbone original_sizes = image_shapes * scales_yx features = self.backbone(images) # generate proposals if none are available if proposals is None: proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes) else: assert proposals is not None # pool object features from either gt_boxes, or from proposals obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes) # prepare FRCNN Outputs and select top proposals boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs( obj_logits=obj_logits, attr_logits=attr_logits, box_deltas=box_deltas, pred_boxes=proposal_boxes, features=feature_pooled, sizes=image_shapes, scales=scales_yx, ) # will we pad??? subset_kwargs = { "max_detections": kwargs.get("max_detections", None), "return_tensors": kwargs.get("return_tensors", None), "pad_value": kwargs.get("pad_value", 0), "padding": kwargs.get("padding", None), } preds_per_image = torch.tensor([p.size(0) for p in boxes]) boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs) classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs) class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs) attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs) attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs) roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs) subset_kwargs["padding"] = None preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs) sizes = pad_list_tensors(image_shapes, None, **subset_kwargs) normalized_boxes = norm_box(boxes, original_sizes) return OrderedDict( { "obj_ids": classes, "obj_probs": class_probs, "attr_ids": attrs, "attr_probs": attr_probs, "boxes": boxes, "sizes": sizes, "preds_per_image": preds_per_image, "roi_features": roi_features, "normalized_boxes": normalized_boxes, } )
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/adversarial/utils_hans.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) logger = logging.getLogger(__name__) @dataclass(frozen=True) class InputExample: """ A single training/test example for simple sequence classification. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. text_b: (Optional) string. The untokenized text of the second sequence. Only must be specified for sequence pair tasks. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. pairID: (Optional) string. Unique identifier for the pair of sentences. """ guid: str text_a: str text_b: Optional[str] = None label: Optional[str] = None pairID: Optional[str] = None @dataclass(frozen=True) class InputFeatures: """ A single set of features of data. Property names are the same names as the corresponding inputs to a model. Args: input_ids: Indices of input sequence tokens in the vocabulary. attention_mask: Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: Usually ``1`` for tokens that are NOT MASKED, ``0`` for MASKED (padded) tokens. token_type_ids: (Optional) Segment token indices to indicate first and second portions of the inputs. Only some models use them. label: (Optional) Label corresponding to the input. Int for classification problems, float for regression problems. pairID: (Optional) Unique identifier for the pair of sentences. """ input_ids: List[int] attention_mask: Optional[List[int]] = None token_type_ids: Optional[List[int]] = None label: Optional[Union[int, float]] = None pairID: Optional[int] = None if is_torch_available(): import torch from torch.utils.data import Dataset class HansDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = None, overwrite_cache=False, evaluate: bool = False, ): processor = hans_processors[task]() cached_features_file = os.path.join( data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", tokenizer.__class__.__name__, str(max_seq_length), task, ), ) label_list = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not overwrite_cache: logger.info(f"Loading features from cached file {cached_features_file}") self.features = torch.load(cached_features_file) else: logger.info(f"Creating features from dataset file at {data_dir}") examples = ( processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) ) logger.info("Training examples: %s", len(examples)) self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) logger.info("Saving features into cached file %s", cached_features_file) torch.save(self.features, cached_features_file) def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list if is_tf_available(): import tensorflow as tf class TFHansDataset: """ This will be superseded by a framework-agnostic approach soon. """ features: List[InputFeatures] def __init__( self, data_dir: str, tokenizer: PreTrainedTokenizer, task: str, max_seq_length: Optional[int] = 128, overwrite_cache=False, evaluate: bool = False, ): processor = hans_processors[task]() label_list = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] self.label_list = label_list examples = processor.get_dev_examples(data_dir) if evaluate else processor.get_train_examples(data_dir) self.features = hans_convert_examples_to_features(examples, label_list, max_seq_length, tokenizer) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d of %d" % (ex_index, len(examples))) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) self.dataset = tf.data.Dataset.from_generator( gen, ( { "example_id": tf.int32, "input_ids": tf.int32, "attention_mask": tf.int32, "token_type_ids": tf.int32, }, tf.int64, ), ( { "example_id": tf.TensorShape([]), "input_ids": tf.TensorShape([None, None]), "attention_mask": tf.TensorShape([None, None]), "token_type_ids": tf.TensorShape([None, None]), }, tf.TensorShape([]), ), ) def get_dataset(self): return self.dataset def __len__(self): return len(self.features) def __getitem__(self, i) -> InputFeatures: return self.features[i] def get_labels(self): return self.label_list class HansProcessor(DataProcessor): """Processor for the HANS data set.""" def get_train_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_train_set.txt")), "train") def get_dev_examples(self, data_dir): """See base class.""" return self._create_examples(self._read_tsv(os.path.join(data_dir, "heuristics_evaluation_set.txt")), "dev") def get_labels(self): """See base class. Note that we follow the standard three labels for MNLI (see :class:`~transformers.data.processors.utils.MnliProcessor`) but the HANS evaluation groups `contradiction` and `neutral` into `non-entailment` (label 0) while `entailment` is label 1.""" return ["contradiction", "entailment", "neutral"] def _create_examples(self, lines, set_type): """Creates examples for the training and dev sets.""" examples = [] for i, line in enumerate(lines): if i == 0: continue guid = "%s-%s" % (set_type, line[0]) text_a = line[5] text_b = line[6] pairID = line[7][2:] if line[7].startswith("ex") else line[7] label = line[0] examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label, pairID=pairID)) return examples def hans_convert_examples_to_features( examples: List[InputExample], label_list: List[str], max_length: int, tokenizer: PreTrainedTokenizer, ): """ Loads a data file into a list of ``InputFeatures`` Args: examples: List of ``InputExamples`` containing the examples. label_list: List of labels. Can be obtained from the processor using the ``processor.get_labels()`` method. max_length: Maximum example length. tokenizer: Instance of a tokenizer that will tokenize the examples. Returns: A list of task-specific ``InputFeatures`` which can be fed to the model. """ label_map = {label: i for i, label in enumerate(label_list)} features = [] for ex_index, example in tqdm.tqdm(enumerate(examples), desc="convert examples to features"): if ex_index % 10000 == 0: logger.info("Writing example %d" % (ex_index)) inputs = tokenizer( example.text_a, example.text_b, add_special_tokens=True, max_length=max_length, padding="max_length", truncation=True, return_overflowing_tokens=True, ) label = label_map[example.label] if example.label in label_map else 0 pairID = int(example.pairID) features.append(InputFeatures(**inputs, label=label, pairID=pairID)) for i, example in enumerate(examples[:5]): logger.info("*** Example ***") logger.info(f"guid: {example}") logger.info(f"features: {features[i]}") return features hans_tasks_num_labels = { "hans": 3, } hans_processors = { "hans": HansProcessor, }
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/adversarial/run_hans.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for sequence classification on HANS.""" import logging import os from dataclasses import dataclass, field from typing import Dict, List, Optional import numpy as np import torch from utils_hans import HansDataset, InputFeatures, hans_processors, hans_tasks_num_labels import transformers from transformers import ( AutoConfig, AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: str = field( metadata={"help": "The name of the task to train selected in the list: " + ", ".join(hans_processors.keys())} ) data_dir: str = field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) def hans_data_collator(features: List[InputFeatures]) -> Dict[str, torch.Tensor]: """ Data collator that removes the "pairID" key if present. """ batch = default_data_collator(features) _ = batch.pop("pairID", None) return batch def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) try: num_labels = hans_tasks_num_labels[data_args.task_name] except KeyError: raise ValueError("Task not found: %s" % (data_args.task_name)) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) model = AutoModelForSequenceClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = ( HansDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, ) if training_args.do_train else None ) eval_dataset = ( HansDataset( data_dir=data_args.data_dir, tokenizer=tokenizer, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, evaluate=True, ) if training_args.do_eval else None ) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=hans_data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") output = trainer.predict(eval_dataset) preds = output.predictions preds = np.argmax(preds, axis=1) pair_ids = [ex.pairID for ex in eval_dataset] output_eval_file = os.path.join(training_args.output_dir, "hans_predictions.txt") label_list = eval_dataset.get_labels() if trainer.is_world_master(): with open(output_eval_file, "w") as writer: writer.write("pairID,gold_label\n") for pid, pred in zip(pair_ids, preds): writer.write("ex" + str(pid) + "," + label_list[int(pred)] + "\n") trainer._log(output.metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/adversarial/requirements.txt
transformers == 3.5.1
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/adversarial/README.md
## Adversarial evaluation of model performances Here is an example on evaluating a model using adversarial evaluation of natural language inference with the Heuristic Analysis for NLI Systems (HANS) dataset [McCoy et al., 2019](https://arxiv.org/abs/1902.01007). The example was gracefully provided by [Nafise Sadat Moosavi](https://github.com/ns-moosavi). The HANS dataset can be downloaded from [this location](https://github.com/tommccoy1/hans). This is an example of using test_hans.py: ```bash export HANS_DIR=path-to-hans export MODEL_TYPE=type-of-the-model-e.g.-bert-roberta-xlnet-etc export MODEL_PATH=path-to-the-model-directory-that-is-trained-on-NLI-e.g.-by-using-run_glue.py python run_hans.py \ --task_name hans \ --model_type $MODEL_TYPE \ --do_eval \ --data_dir $HANS_DIR \ --model_name_or_path $MODEL_PATH \ --max_seq_length 128 \ --output_dir $MODEL_PATH \ ``` This will create the hans_predictions.txt file in MODEL_PATH, which can then be evaluated using hans/evaluate_heur_output.py from the HANS dataset. The results of the BERT-base model that is trained on MNLI using batch size 8 and the random seed 42 on the HANS dataset is as follows: ```bash Heuristic entailed results: lexical_overlap: 0.9702 subsequence: 0.9942 constituent: 0.9962 Heuristic non-entailed results: lexical_overlap: 0.199 subsequence: 0.0396 constituent: 0.118 ```
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/finetune.sh
# the proper usage is documented in the README, you need to specify data_dir, output_dir and model_name_or_path # run ./finetune.sh --help to see all the possible options python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --gpus 1 \ --do_train \ --do_predict \ --n_val 1000 \ --val_check_interval 0.1 \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/finetune_t5.sh
# Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" python finetune.py \ --data_dir=$CNN_DIR \ --learning_rate=3e-5 \ --train_batch_size=$BS \ --eval_batch_size=$BS \ --output_dir=$OUTPUT_DIR \ --max_source_length=512 \ --max_target_length=56 \ --val_check_interval=0.1 --n_val=200 \ --do_train --do_predict \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/lightning_base.py
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version logger = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") MODEL_MODES = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeq2SeqLM, "translation": AutoModelForSeq2SeqLM, } # update this and the import above to support new schedulers from transformers.optimization arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs, ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, mode): if mode == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. " "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs, ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} # TODO: remove with PyTorch 1.6 since pl uses native amp if args.fp16: train_params["precision"] = 16 train_params["amp_level"] = args.fp16_opt_level if args.gpus > 1: train_params["distributed_backend"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["accelerator"] = extra_train_kwargs.get("accelerator", None) train_params["profiler"] = extra_train_kwargs.get("profiler", None) trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks, logger=logger, checkpoint_callback=checkpoint_callback, **train_params, ) if args.do_train: trainer.fit(model) return trainer
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/finetune_bart_tiny.sh
# Script for verifying that run_bart_sum can be invoked from its directory # Get tiny dataset with cnn_dm format (4 examples for train, val, test) wget https://cdn-datasets.huggingface.co/summarization/cnn_tiny.tgz tar -xzvf cnn_tiny.tgz rm cnn_tiny.tgz export OUTPUT_DIR_NAME=bart_utest_output export CURRENT_DIR=${PWD} export OUTPUT_DIR=${CURRENT_DIR}/${OUTPUT_DIR_NAME} # Make output directory if it doesn't exist mkdir -p $OUTPUT_DIR # Add parent directory to python path to access lightning_base.py and testing_utils.py export PYTHONPATH="../":"${PYTHONPATH}" python finetune.py \ --data_dir=cnn_tiny/ \ --model_name_or_path=sshleifer/bart-tiny-random \ --learning_rate=3e-5 \ --train_batch_size=2 \ --eval_batch_size=2 \ --output_dir=$OUTPUT_DIR \ --num_train_epochs=1 \ --gpus=0 \ --do_train "$@" rm -rf cnn_tiny rm -rf $OUTPUT_DIR
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/distillation.py
#!/usr/bin/env python import argparse import gc import os import sys from pathlib import Path from typing import List # noqa: F401 import pytorch_lightning as pl import torch from finetune import SummarizationModule, TranslationModule from finetune import main as ft_main from make_student import create_student_by_copying_alternating_layers, get_layers_to_supervise from torch import nn from transformers import AutoModelForSeq2SeqLM, MBartTokenizer, T5ForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import calculate_bleu, check_output_dir, freeze_params, label_smoothed_nll_loss, use_task_specific_params # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import generic_train # noqa class SummarizationDistiller(SummarizationModule): """Supports T5, Bart, Pegasus and other models that inherit from Bart.""" loss_names = ["loss", "ce_loss", "mlm_loss", "hid_loss_enc", "hid_loss_dec"] def __init__(self, hparams): assert Path(hparams.data_dir).exists() self.output_dir = Path(hparams.output_dir) self.output_dir.mkdir(exist_ok=True) save_dir = self.output_dir.joinpath("student") hparams.model_name_or_path = str(save_dir) # Tell lightning we are training the student teacher = AutoModelForSeq2SeqLM.from_pretrained(hparams.teacher).eval() use_task_specific_params(teacher, hparams.task) # We copy good generation parameters to student by default if hparams.student is not None: student = AutoModelForSeq2SeqLM.from_pretrained(hparams.student) use_task_specific_params(student, hparams.task) e_layer_ids, d_layer_ids = None, None else: student, e_layer_ids, d_layer_ids = create_student_by_copying_alternating_layers( teacher, e=hparams.student_encoder_layers, d=hparams.student_decoder_layers, save_path=save_dir ) if hparams.length_penalty != -1: student.config.length_penalty = hparams.length_penalty hparams.tokenizer_name = hparams.teacher # Use teacher's tokenizer super().__init__(hparams, model=student, config=student.config) assert student.config.model_type == teacher.config.model_type, ( f"teacher, student model types should be the same, got {student.config.model_type} !=" f" {teacher.config.model_type}" ) if student.config.model_type == "t5": student_encoder_layers = len(student.get_encoder().block) student_decoder_layers = len(student.get_decoder().block) teacher_encoder_layers = len(teacher.get_encoder().block) teacher_decoder_layers = len(teacher.get_decoder().block) else: student_encoder_layers = student.config.encoder_layers student_decoder_layers = student.config.decoder_layers teacher_encoder_layers = teacher.config.encoder_layers teacher_decoder_layers = teacher.config.decoder_layers self.different_base_models = not (hparams.student is None or hparams.teacher == hparams.student) self.do_calc_hidden_loss = (not self.different_base_models) and hparams.alpha_hid > 0 self.different_encoder = self.different_base_models or (student_encoder_layers != teacher_encoder_layers) # self.different_encoder determines whether we need to run the teacher encoder self.teacher = teacher freeze_params(self.teacher) if not self.different_encoder: # To save RAM, delete teacher encoder and freeze student encoder. try: del self.teacher.model.encoder except AttributeError: # T5 del self.teacher.encoder if e_layer_ids is None: e_layer_ids = list(range(student_encoder_layers)) if d_layer_ids is None: d_layer_ids = list(range(student_decoder_layers)) self.e_layer_ids, self.d_layer_ids = e_layer_ids, d_layer_ids # type: List[int], List[int] if self.do_calc_hidden_loss: # Intermediate supervision: Decide which layers to supervise if hparams.supervise_forward: self.e_matches = get_layers_to_supervise( n_student=len(self.e_layer_ids), n_teacher=teacher_encoder_layers ) self.d_matches = get_layers_to_supervise( n_student=len(self.d_layer_ids), n_teacher=teacher_decoder_layers ) else: # student layer should emulate hidden states of the teacher layer it was copied from self.e_matches = self.e_layer_ids self.d_matches = self.d_layer_ids else: self.e_matches = None self.d_matches = None self.ce_loss_fct = nn.KLDivLoss(reduction="batchmean") self.temperature = 2.0 self.alpha_mlm = hparams.alpha_mlm self.alpha_ce = hparams.alpha_ce self.alpha_hid = hparams.alpha_hid gc.collect() torch.cuda.empty_cache() def calc_ce_loss(self, mask, s_logits, t_logits): """Copy pasted from distillbert (transformers/examples/distillation/)""" # mask has False at padding_idx sel_mask = mask[:, :, None].expand_as(s_logits) vocab_size = s_logits.size(-1) s_logits_slct = torch.masked_select(s_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask t_logits_slct = torch.masked_select(t_logits, sel_mask) # (bs * seq_length * voc_size) modulo the 1s in mask s_logits_slct = s_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask t_logits_slct = t_logits_slct.view(-1, vocab_size) # (bs * seq_length, voc_size) modulo the 1s in mask assert t_logits_slct.size() == s_logits_slct.size() loss_ce = ( self.ce_loss_fct( nn.functional.log_softmax(s_logits_slct / self.temperature, dim=-1), nn.functional.softmax(t_logits_slct / self.temperature, dim=-1), ) * (self.temperature) ** 2 ) return loss_ce @staticmethod def add_model_specific_args(parser, root_dir): SummarizationModule.add_model_specific_args(parser, root_dir) add_distill_args(parser) return parser def _step(self, batch: dict) -> tuple: """Compute the loss for a batch""" pad_token_id = self.tokenizer.pad_token_id input_ids, src_mask, labels = batch["input_ids"], batch["attention_mask"], batch["labels"] if isinstance(self.model, T5ForConditionalGeneration): decoder_input_ids = self.model._shift_right(labels) else: decoder_input_ids = shift_tokens_right(labels, pad_token_id) # noinspection PyCallingNonCallable student_outputs = self( input_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, output_hidden_states=self.do_calc_hidden_loss, output_attentions=False, use_cache=False, ) lm_logits = student_outputs["logits"] # Same cross entropy vs. label smoothing logic as finetune.py assert lm_logits.shape[-1] == self.model.config.vocab_size if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id loss_fct = nn.CrossEntropyLoss(ignore_index=pad_token_id) student_lm_loss = loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), labels.view(-1)) else: lprobs = nn.functional.log_softmax(lm_logits, dim=-1) student_lm_loss, _ = label_smoothed_nll_loss( lprobs, labels, self.hparams.label_smoothing, ignore_index=pad_token_id ) def zero_tensor(): return torch.tensor(0.0).type_as(student_lm_loss) teacher_enc_outputs = student_outputs[ "encoder_last_hidden_state" ] # use this unless self.different_base_models hid_loss_enc, hid_loss_dec = zero_tensor(), zero_tensor() if self.different_encoder: # compute encoder hidden state loss all_teacher_encoder_outputs = self.teacher.get_encoder()( input_ids, attention_mask=src_mask, output_hidden_states=self.do_calc_hidden_loss, ) if self.different_base_models: teacher_enc_outputs = all_teacher_encoder_outputs["last_hidden_state"] elif self.do_calc_hidden_loss: hid_loss_enc = self.calc_hidden_loss( src_mask, student_outputs["encoder_hidden_states"], all_teacher_encoder_outputs["hidden_states"], self.e_matches, normalize_hidden=self.hparams.normalize_hidden, ) teacher_outputs = self.teacher( input_ids, attention_mask=src_mask, encoder_outputs=(teacher_enc_outputs,), decoder_input_ids=decoder_input_ids, output_hidden_states=self.do_calc_hidden_loss, use_cache=False, # since we are not passing labels, never let this default to True ) dec_mask = decoder_input_ids.ne(pad_token_id) loss_ce = self.calc_ce_loss(dec_mask, lm_logits, teacher_outputs["logits"]) if self.do_calc_hidden_loss: # Intermediate supervision of decoder hidden states hid_loss_dec = self.calc_hidden_loss( dec_mask, student_outputs["decoder_hidden_states"], teacher_outputs["decoder_hidden_states"], self.d_matches, normalize_hidden=self.hparams.normalize_hidden, ) blended_loss = ( self.alpha_ce * loss_ce + self.alpha_mlm * student_lm_loss + self.hparams.alpha_hid * (hid_loss_enc + hid_loss_dec) ) return blended_loss, loss_ce, student_lm_loss, hid_loss_enc, hid_loss_dec @staticmethod def calc_hidden_loss(attention_mask, hidden_states, hidden_states_T, matches, normalize_hidden): """MSE(student_hid, teacher_hid[matches]). Called "Intermediate supervision" in paper. Inspired by TinyBERT.""" msg = "expected list or tuple for hidden_states, got tensor of shape: " assert not isinstance(hidden_states, torch.Tensor), f"{msg}{hidden_states.shape}" assert not isinstance(hidden_states_T, torch.Tensor), f"{msg}{hidden_states_T.shape}" mask = attention_mask.to(hidden_states[0]) valid_count = mask.sum() * hidden_states[0].size(-1) student_states = torch.stack([hidden_states[i] for i in range(len(matches))]) teacher_states = torch.stack([hidden_states_T[j] for j in matches]) assert student_states.shape == teacher_states.shape, f"{student_states.shape} != {teacher_states.shape}" if normalize_hidden: student_states = nn.functional.layer_norm(student_states, student_states.shape[1:]) teacher_states = nn.functional.layer_norm(teacher_states, teacher_states.shape[1:]) mse = nn.functional.mse_loss(student_states, teacher_states, reduction="none") masked_mse = (mse * mask.unsqueeze(0).unsqueeze(-1)).sum() / valid_count return masked_mse def add_distill_args(parser): # NOTE: if --student argument was specified and the teacher and student base models # are different, the models still have to have the same tokenizer, specified by # --tokenizer_name. So, for example, you can distill from t5_large to t5_small but not # from bart to t5. This s because if the tokenizers are different, the output space # for the two models is also different and their logits are not comparable. parser.add_argument("--teacher", type=str) parser.add_argument("--alpha_ce", default=0.8, type=float) parser.add_argument("--alpha_mlm", default=0.2, type=float) parser.add_argument("--alpha_hid", default=0.0, type=float, required=False) parser.add_argument("--student", type=str, required=False) parser.add_argument("--student_decoder_layers", default=12, type=int, required=False) parser.add_argument("--student_encoder_layers", default=12, type=int, required=False) parser.add_argument("--no_teacher", action="store_true", default=False) parser.add_argument("--length_penalty", type=float, default=-1) parser.add_argument("--supervise_forward", action="store_true", default=False) parser.add_argument("--normalize_hidden", action="store_true", default=False) class TranslationDistiller(SummarizationDistiller): """Supports T5, mBART, Marian, other models that inherit from Bart.""" mode = "translation" metric_names = ["bleu"] default_val_metric = "bleu" def __init__(self, hparams, **kwargs): super().__init__(hparams, **kwargs) assert hparams.src_lang is not None assert hparams.tgt_lang is not None self.dataset_kwargs["src_lang"] = hparams.src_lang self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] def calc_generative_metrics(self, preds, target) -> dict: return calculate_bleu(preds, target) @staticmethod def add_model_specific_args(parser, root_dir): TranslationModule.add_model_specific_args(parser, root_dir) add_distill_args(parser) return parser def create_module(args): if args.no_teacher: module_cls = TranslationModule if "translation" in args.task else SummarizationModule else: # DISTILL WITH TEACHER module_cls = TranslationDistiller if "translation" in args.task else SummarizationDistiller args.setup_cls: str = module_cls.__name__ print(f"using module {args.setup_cls}") model = module_cls(args) return model def distill_main(args): Path(args.output_dir).mkdir(exist_ok=True) check_output_dir(args, expected_items=3) model = create_module(args) return ft_main(args, model=model) if __name__ == "__main__": parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationDistiller.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() distill_main(args)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/distil_marian_no_teacher.sh
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar export MAX_LEN=128 python finetune.py \ --learning_rate=3e-4 \ --do_train \ --do_predict \ --fp16 \ --val_check_interval 0.25 \ --data_dir $ENRO_DIR \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --freeze_encoder --freeze_embeds \ --train_batch_size=$BS --eval_batch_size=$BS \ --tokenizer_name $m --model_name_or_path $m \ --warmup_steps 500 --sortish_sampler --logger_name wandb \ --gpus 1 --fp16_opt_level=O1 --task translation --num_sanity_val_steps=0 \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/requirements.txt
tensorboard scikit-learn psutil sacrebleu rouge-score tensorflow_datasets pytorch-lightning matplotlib git-python==1.0.3 faiss-cpu streamlit elasticsearch nltk pandas datasets >= 1.1.3 fire pytest conllu sentencepiece != 0.1.92 protobuf
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar export MAX_LEN=128 export m=sshleifer/student_marian_en_ro_6_1 python finetune.py \ --learning_rate=3e-4 \ --do_train \ --fp16 \ --data_dir wmt_en_ro \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --freeze_encoder --freeze_embeds \ --train_batch_size=48 --eval_batch_size=64 \ --tokenizer_name $m --model_name_or_path $m --num_train_epochs=1 \ --warmup_steps 500 --logger_name wandb --gpus 1 \ --fp16_opt_level=O1 --task translation \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/make_student.py
import warnings from pathlib import Path from typing import List, Tuple, Union import fire from torch import nn from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, PreTrainedModel from transformers.utils import logging logger = logging.get_logger(__name__) def copy_layers(src_layers: nn.ModuleList, dest_layers: nn.ModuleList, layers_to_copy: List[int]) -> None: layers_to_copy = nn.ModuleList([src_layers[i] for i in layers_to_copy]) assert len(dest_layers) == len(layers_to_copy), f"{len(dest_layers)} != {len(layers_to_copy)}" dest_layers.load_state_dict(layers_to_copy.state_dict()) LAYERS_TO_COPY = { # maps num layers in teacher -> num_layers in student -> which teacher layers to copy. # 12: bart, 16: pegasus, 6: marian/Helsinki-NLP 12: { 1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher 2: [0, 6], 3: [0, 6, 11], 4: [0, 4, 8, 11], 6: [0, 2, 4, 7, 9, 11], 9: [0, 1, 2, 4, 5, 7, 9, 10, 11], 12: list(range(12)), }, 16: { # maps num layers in student -> which teacher layers to copy 1: [0], 2: [0, 15], 3: [0, 8, 15], 4: [0, 5, 10, 15], 6: [0, 3, 6, 9, 12, 15], 8: [0, 2, 4, 6, 8, 10, 12, 15], 9: [0, 1, 3, 5, 7, 9, 11, 13, 15], 12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15], 16: list(range(16)), }, 6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))}, } LAYERS_TO_SUPERVISE = { # maps num layers in student -> which teacher layers to copy. 6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]}, 12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]}, 16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]}, } def pick_layers_to_copy(n_student, n_teacher): try: val = LAYERS_TO_COPY[n_teacher][n_student] return val except KeyError: if n_student != n_teacher: warnings.warn( f"no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first" f" {n_student}" ) return list(range(n_student)) def get_layers_to_supervise(n_student, n_teacher) -> List[int]: """Used or the --supervise_forward kwarg""" if n_student > n_teacher: raise ValueError(f"Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}") elif n_teacher == n_student: return list(range(n_teacher)) elif n_student == 1: return [n_teacher - 1] else: return LAYERS_TO_SUPERVISE[n_teacher][n_student] def create_student_by_copying_alternating_layers( teacher: Union[str, PreTrainedModel], save_path: Union[str, Path] = "student", e: Union[int, None] = None, d: Union[int, None] = None, copy_first_teacher_layers=False, e_layers_to_copy=None, d_layers_to_copy=None, **extra_config_kwargs, ) -> Tuple[PreTrainedModel, List[int], List[int]]: """Make a student by copying alternating layers from a teacher, save it to save_path. Args: teacher: str or PreTrainedModel if str, this will call AutoModelForSeq2SeqLM.from_pretrained(teacher) before copying layers save_path: where to save the student, defaults to student directory. e: how many Encoder layers should the student have, default is fully copy of teacher d: how many Decoder layers should the student have, default is fully copy of teacher copy_first_teacher_layers: [bool] dont copy alternating layers, just the first e/d. **extra_config_kwargs: extra kwargs to pass to the student, by default the teacher config is used. Returns: student: new, smaller model. (Also saves it to save_path) e_layers_to_copy: list of which teacher encoder layers were used d_layers_to_copy: list of which teacher decoder layers were used """ _msg = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher." assert (e is not None) or (d is not None), _msg if isinstance(teacher, str): AutoTokenizer.from_pretrained(teacher).save_pretrained(save_path) # purely for convenience teacher = AutoModelForSeq2SeqLM.from_pretrained(teacher).eval() else: assert isinstance(teacher, PreTrainedModel), f"teacher must be a model or string got type {type(teacher)}" init_kwargs = teacher.config.to_diff_dict() try: teacher_e, teacher_d = teacher.config.encoder_layers, teacher.config.decoder_layers if e is None: e = teacher_e if d is None: d = teacher_d init_kwargs.update({"encoder_layers": e, "decoder_layers": d}) except AttributeError: # T5 if hasattr(teacher.config, "num_encoder_layers"): teacher_e, teacher_d = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers else: teacher_e, teacher_d = teacher.config.num_layers, teacher.config.num_decoder_layers if e is None: e = teacher_e if d is None: d = teacher_d if hasattr(teacher.config, "num_encoder_layers"): init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d}) else: init_kwargs.update({"num_layers": e, "num_decoder_layers": d}) # Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs init_kwargs.update(extra_config_kwargs) # Copy weights student_cfg = teacher.config_class(**init_kwargs) student = AutoModelForSeq2SeqLM.from_config(student_cfg) # Start by copying the full teacher state dict this will copy the first N teacher layers to the student. info = student.load_state_dict(teacher.state_dict(), strict=False) assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys. if copy_first_teacher_layers: # Our copying is done. We just log and save e_layers_to_copy, d_layers_to_copy = list(range(e)), list(range(d)) logger.info( f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to" f" {save_path}" ) student.save_pretrained(save_path) return student, e_layers_to_copy, d_layers_to_copy # Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer. if e_layers_to_copy is None: e_layers_to_copy: List[int] = pick_layers_to_copy(e, teacher_e) if d_layers_to_copy is None: d_layers_to_copy: List[int] = pick_layers_to_copy(d, teacher_d) try: if hasattr( teacher, "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers copy_layers(teacher.prophetnet.encoder.layers, student.prophetnet.encoder.layers, e_layers_to_copy) copy_layers(teacher.prophetnet.decoder.layers, student.prophetnet.decoder.layers, d_layers_to_copy) else: copy_layers(teacher.model.encoder.layers, student.model.encoder.layers, e_layers_to_copy) copy_layers(teacher.model.decoder.layers, student.model.decoder.layers, d_layers_to_copy) except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block copy_layers(teacher.encoder.block, student.encoder.block, e_layers_to_copy) copy_layers(teacher.decoder.block, student.decoder.block, d_layers_to_copy) logger.info( f"Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}" ) student.config.init_metadata = { "teacher_type": teacher.config.model_type, "copied_encoder_layers": e_layers_to_copy, "copied_decoder_layers": d_layers_to_copy, } student.save_pretrained(save_path) # Save information about copying for easier reproducibility return student, e_layers_to_copy, d_layers_to_copy if __name__ == "__main__": fire.Fire(create_student_by_copying_alternating_layers)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/train_mbart_cc25_enro.sh
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --do_train \ --val_check_interval=0.25 \ --adam_eps 1e-06 \ --num_train_epochs 6 --src_lang en_XX --tgt_lang ro_RO \ --data_dir $ENRO_DIR \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --train_batch_size=$BS --eval_batch_size=$BS \ --task translation \ --warmup_steps 500 \ --freeze_embeds \ --model_name_or_path=facebook/mbart-large-cc25 \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/utils.py
import itertools import json import linecache import math import os import pickle import socket from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Tuple, Union import git import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from sacrebleu import corpus_bleu from sentence_splitter import add_newline_to_end_of_each_sentence from torch import nn from torch.utils.data import Dataset, Sampler from transformers import BartTokenizer, EvalPrediction, PreTrainedTokenizer, T5Tokenizer from transformers.file_utils import cached_property from transformers.models.bart.modeling_bart import shift_tokens_right try: from fairseq.data.data_utils import batch_by_size FAIRSEQ_AVAILABLE = True except (ImportError, ModuleNotFoundError): FAIRSEQ_AVAILABLE = False def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100): """From fairseq""" if target.dim() == lprobs.dim() - 1: target = target.unsqueeze(-1) nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() # mean()? Scared to break other math. smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss, nll_loss def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict: """Uses sacrebleu's corpus_bleu implementation.""" return {"bleu": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)} def build_compute_metrics_fn(task_name: str, tokenizer: PreTrainedTokenizer) -> Callable[[EvalPrediction], Dict]: def non_pad_len(tokens: np.ndarray) -> int: return np.count_nonzero(tokens != tokenizer.pad_token_id) def decode_pred(pred: EvalPrediction) -> Tuple[List[str], List[str]]: pred_str = tokenizer.batch_decode(pred.predictions, skip_special_tokens=True) label_str = tokenizer.batch_decode(pred.label_ids, skip_special_tokens=True) pred_str = lmap(str.strip, pred_str) label_str = lmap(str.strip, label_str) return pred_str, label_str def summarization_metrics(pred: EvalPrediction) -> Dict: pred_str, label_str = decode_pred(pred) rouge: Dict = calculate_rouge(pred_str, label_str) summ_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1) rouge.update({"gen_len": summ_len}) return rouge def translation_metrics(pred: EvalPrediction) -> Dict: pred_str, label_str = decode_pred(pred) bleu: Dict = calculate_bleu(pred_str, label_str) gen_len = np.round(np.mean(lmap(non_pad_len, pred.predictions)), 1) bleu.update({"gen_len": gen_len}) return bleu compute_metrics_fn = summarization_metrics if "summarization" in task_name else translation_metrics return compute_metrics_fn def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class AbstractSeq2SeqDataset(Dataset): def __init__( self, tokenizer, data_dir, max_source_length, max_target_length, type_path="train", n_obs=None, prefix="", **dataset_kwargs, ): super().__init__() self.src_file = Path(data_dir).joinpath(type_path + ".source") self.tgt_file = Path(data_dir).joinpath(type_path + ".target") self.len_file = Path(data_dir).joinpath(type_path + ".len") if os.path.exists(self.len_file): self.src_lens = pickle_load(self.len_file) self.used_char_len = False else: self.src_lens = self.get_char_lens(self.src_file) self.used_char_len = True self.max_source_length = max_source_length self.max_target_length = max_target_length assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" self.tokenizer = tokenizer self.prefix = prefix if prefix is not None else "" if n_obs is not None: self.src_lens = self.src_lens[:n_obs] self.pad_token_id = self.tokenizer.pad_token_id self.dataset_kwargs = dataset_kwargs dataset_kwargs.update({"add_prefix_space": True} if isinstance(self.tokenizer, BartTokenizer) else {}) def __len__(self): return len(self.src_lens) @staticmethod def get_char_lens(data_file): return [len(x) for x in Path(data_file).open().readlines()] @cached_property def tgt_lens(self): """Length in characters of target documents""" return self.get_char_lens(self.tgt_file) def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs): if distributed: return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs) else: return SortishSampler(self.src_lens, batch_size, shuffle=shuffle) def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs): assert FAIRSEQ_AVAILABLE, "Dynamic batch size requires `pip install fairseq`" assert not self.used_char_len, "You must call python make_len_file.py before calling make_dynamic_sampler" sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False)) def num_tokens_in_example(i): return min(self.src_lens[i], self.max_target_length) # call fairseq cython function batch_sampler: List[List[int]] = batch_by_size( sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64, ) shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))] # move the largest batch to the front to OOM quickly (uses an approximation for padding) approximate_toks_per_batch = [max(self.src_lens[i] for i in batch) * len(batch) for batch in shuffled_batches] largest_batch_idx = np.argmax(approximate_toks_per_batch) shuffled_batches[0], shuffled_batches[largest_batch_idx] = ( shuffled_batches[largest_batch_idx], shuffled_batches[0], ) return shuffled_batches def __getitem__(self, item): raise NotImplementedError("You must implement this") def collate_fn(self, batch): raise NotImplementedError("You must implement this") class LegacySeq2SeqDataset(AbstractSeq2SeqDataset): def __getitem__(self, index) -> Dict[str, torch.Tensor]: """Call tokenizer on src and tgt_lines""" index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" source_inputs = self.encode_line(self.tokenizer, source_line, self.max_source_length) target_inputs = self.encode_line(self.tokenizer, tgt_line, self.max_target_length) source_ids = source_inputs["input_ids"].squeeze() target_ids = target_inputs["input_ids"].squeeze() src_mask = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "labels": target_ids, } def encode_line(self, tokenizer, line, max_length, pad_to_max_length=True, return_tensors="pt"): """Only used by LegacyDataset""" return tokenizer( [line], max_length=max_length, padding="max_length" if pad_to_max_length else None, truncation=True, return_tensors=return_tensors, **self.dataset_kwargs, ) def collate_fn(self, batch) -> Dict[str, torch.Tensor]: input_ids = torch.stack([x["input_ids"] for x in batch]) masks = torch.stack([x["attention_mask"] for x in batch]) target_ids = torch.stack([x["labels"] for x in batch]) pad_token_id = self.pad_token_id y = trim_batch(target_ids, pad_token_id) source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks) batch = { "input_ids": source_ids, "attention_mask": source_mask, "labels": y, } return batch class Seq2SeqDataset(AbstractSeq2SeqDataset): """A dataset that calls prepare_seq2seq_batch.""" def __getitem__(self, index) -> Dict[str, str]: index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" return {"tgt_texts": tgt_line, "src_texts": source_line, "id": index - 1} def collate_fn(self, batch) -> Dict[str, torch.Tensor]: """Call prepare_seq2seq_batch.""" batch_encoding: Dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch( [x["src_texts"] for x in batch], tgt_texts=[x["tgt_texts"] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors="pt", **self.dataset_kwargs, ).data batch_encoding["ids"] = torch.tensor([x["id"] for x in batch]) return batch_encoding class Seq2SeqDataCollator: def __init__(self, tokenizer, data_args, tpu_num_cores=None): self.tokenizer = tokenizer self.pad_token_id = tokenizer.pad_token_id assert ( self.pad_token_id is not None ), f"pad_token_id is not defined for ({self.tokenizer.__class__.__name__}), it must be defined." self.data_args = data_args self.tpu_num_cores = tpu_num_cores self.dataset_kwargs = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) else {} if data_args.src_lang is not None: self.dataset_kwargs["src_lang"] = data_args.src_lang if data_args.tgt_lang is not None: self.dataset_kwargs["tgt_lang"] = data_args.tgt_lang def __call__(self, batch) -> Dict[str, torch.Tensor]: if hasattr(self.tokenizer, "prepare_seq2seq_batch"): batch = self._encode(batch) input_ids, attention_mask, labels = ( batch["input_ids"], batch["attention_mask"], batch["labels"], ) else: input_ids = torch.stack([x["input_ids"] for x in batch]) attention_mask = torch.stack([x["attention_mask"] for x in batch]) labels = torch.stack([x["labels"] for x in batch]) labels = trim_batch(labels, self.pad_token_id) input_ids, attention_mask = trim_batch(input_ids, self.pad_token_id, attention_mask=attention_mask) if isinstance(self.tokenizer, T5Tokenizer): decoder_input_ids = self._shift_right_t5(labels) else: decoder_input_ids = shift_tokens_right(labels, self.pad_token_id) batch = { "input_ids": input_ids, "attention_mask": attention_mask, "decoder_input_ids": decoder_input_ids, "labels": labels, } return batch def _shift_right_t5(self, input_ids): # shift inputs to the right shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[..., 1:] = input_ids[..., :-1].clone() shifted_input_ids[..., 0] = self.pad_token_id return shifted_input_ids def _encode(self, batch) -> Dict[str, torch.Tensor]: batch_encoding = self.tokenizer.prepare_seq2seq_batch( [x["src_texts"] for x in batch], tgt_texts=[x["tgt_texts"] for x in batch], max_length=self.data_args.max_source_length, max_target_length=self.data_args.max_target_length, padding="max_length" if self.tpu_num_cores is not None else "longest", # TPU hack return_tensors="pt", **self.dataset_kwargs, ) return batch_encoding.data class SortishSampler(Sampler): "Go through the text data by order of src length with a bit of randomness. From fastai repo." def __init__(self, data, batch_size, shuffle=True): self.data, self.bs, self.shuffle = data, batch_size, shuffle def __len__(self) -> int: return len(self.data) def __iter__(self): return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle)) def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array: "Go through the text data by order of src length with a bit of randomness. From fastai repo." if not shuffle: return np.argsort(np.array(data) * -1) def key_fn(i): return data[i] idxs = np.random.permutation(len(data)) sz = bs * 50 ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)] sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx]) sz = bs ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)] max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key, ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first. sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=int) sort_idx = np.concatenate((ck_idx[0], sort_idx)) return sort_idx class DistributedSortishSampler(Sampler): """Copied from torch DistributedSampler""" def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 if add_extra_examples: self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas else: self.total_size = len(dataset) self.num_samples = len(self.available_indices) self.batch_size = batch_size self.add_extra_examples = add_extra_examples self.shuffle = shuffle def __iter__(self) -> Iterable: g = torch.Generator() g.manual_seed(self.epoch) sortish_data = [self.dataset.src_lens[i] for i in self.available_indices] sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle) indices = [self.available_indices[i] for i in sortish_indices] assert len(indices) == self.num_samples return iter(indices) @cached_property def available_indices(self) -> np.array: indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample available_indices = indices[self.rank : self.total_size : self.num_replicas] return available_indices def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch logger = getLogger(__name__) def use_task_specific_params(model, task): """Update config with summarization specific params.""" task_specific_params = model.config.task_specific_params if task_specific_params is not None: pars = task_specific_params.get(task, {}) logger.info(f"using task specific params for {task}: {pars}") model.config.update(pars) def pickle_load(path): """pickle.load(path)""" with open(path, "rb") as f: return pickle.load(f) def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f) def flatten_list(summary_ids: List[List]): return list(itertools.chain.from_iterable(summary_ids)) def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json")) def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, **json_dump_kwargs) def load_json(path): with open(path) as f: return json.load(f) def get_git_info(): try: repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), "hostname": str(socket.gethostname()), } return repo_infos except TypeError: return { "repo_id": None, "repo_sha": None, "repo_branch": None, "hostname": None, } ROUGE_KEYS = ["rouge1", "rouge2", "rougeL", "rougeLsum"] def extract_rouge_mid_statistics(dct): new_dict = {} for k1, v1 in dct.items(): mid = v1.mid new_dict[k1] = {stat: round(getattr(mid, stat), 4) for stat in ["precision", "recall", "fmeasure"]} return new_dict def calculate_rouge( pred_lns: List[str], tgt_lns: List[str], use_stemmer=True, rouge_keys=ROUGE_KEYS, return_precision_and_recall=False, bootstrap_aggregation=True, newline_sep=True, ) -> Dict: """Calculate rouge using rouge_scorer package. Args: pred_lns: list of summaries generated by model tgt_lns: list of groundtruth summaries (e.g. contents of val.target) use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes to improve matching. rouge_keys: which metrics to compute, defaults to rouge1, rouge2, rougeL, rougeLsum return_precision_and_recall: (False) whether to also return precision and recall. bootstrap_aggregation: whether to do the typical bootstrap resampling of scores. Defaults to True, if False this function returns a collections.defaultdict[metric: list of values for each observation for each subscore]`` newline_sep:(default=True) whether to add newline between sentences. This is essential for calculation rougeL on multi sentence summaries (CNN/DM dataset). Returns: Dict[score: value] if aggregate else defaultdict(list) keyed by rouge_keys """ scorer = rouge_scorer.RougeScorer(rouge_keys, use_stemmer=use_stemmer) aggregator = scoring.BootstrapAggregator() for pred, tgt in zip(tgt_lns, pred_lns): # rougeLsum expects "\n" separated sentences within a summary if newline_sep: pred = add_newline_to_end_of_each_sentence(pred) tgt = add_newline_to_end_of_each_sentence(tgt) scores = scorer.score(pred, tgt) aggregator.add_scores(scores) if bootstrap_aggregation: result = aggregator.aggregate() if return_precision_and_recall: return extract_rouge_mid_statistics(result) # here we return dict else: return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()} else: return aggregator._scores # here we return defaultdict(list) # Utilities for freezing parameters and checking whether they are frozen def freeze_params(model: nn.Module): """Set requires_grad=False for each of model.parameters()""" for par in model.parameters(): par.requires_grad = False def freeze_embeds(model): """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.""" model_type = model.config.model_type if model_type == "t5": freeze_params(model.shared) for d in [model.encoder, model.decoder]: freeze_params(d.embed_tokens) elif model_type == "fsmt": for d in [model.model.encoder, model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens) else: freeze_params(model.model.shared) for d in [model.model.encoder, model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens) def grad_status(model: nn.Module) -> Iterable: return (par.requires_grad for par in model.parameters()) def any_requires_grad(model: nn.Module) -> bool: return any(grad_status(model)) def assert_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) n_require_grad = sum(lmap(int, model_grads)) npars = len(model_grads) assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad" def assert_not_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) npars = len(model_grads) assert any(model_grads), f"none of {npars} weights require grad" def parse_numeric_n_bool_cl_kwargs(unparsed_args: List[str]) -> Dict[str, Union[int, float, bool]]: """ Parse an argv list of unspecified command line args to a dict. Assumes all values are either numeric or boolean in the form of true/false. """ result = {} assert len(unparsed_args) % 2 == 0, f"got odd number of unparsed args: {unparsed_args}" num_pairs = len(unparsed_args) // 2 for pair_num in range(num_pairs): i = 2 * pair_num assert unparsed_args[i].startswith("--") if unparsed_args[i + 1].lower() == "true": value = True elif unparsed_args[i + 1].lower() == "false": value = False else: try: value = int(unparsed_args[i + 1]) except ValueError: value = float(unparsed_args[i + 1]) # this can raise another informative ValueError result[unparsed_args[i][2:]] = value return result def write_txt_file(ordered_tgt, path): f = Path(path).open("w") for ln in ordered_tgt: f.write(ln + "\n") f.flush() def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i : i + n] def check_output_dir(args, expected_items=0): """ Checks whether to bail out if output_dir already exists and has more than expected_items in it `args`: needs to have the following attributes of `args`: - output_dir - do_train - overwrite_output_dir `expected_items`: normally 0 (default) - i.e. empty dir, but in some cases a few files are expected (e.g. recovery from OOM) """ if ( os.path.exists(args.output_dir) and len(os.listdir(args.output_dir)) > expected_items and args.do_train and not args.overwrite_output_dir ): raise ValueError( f"Output directory ({args.output_dir}) already exists and " f"has {len(os.listdir(args.output_dir))} items in it (expected {expected_items} items). " "Use --overwrite_output_dir to overcome." )
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py
#!/usr/bin/env python import os from pathlib import Path from typing import Dict, List import fire import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.utils.logging import get_logger logger = get_logger(__name__) def remove_prefix(text: str, prefix: str): if text.startswith(prefix): return text[len(prefix) :] return text # or whatever def sanitize(sd): return {remove_prefix(k, "model."): v for k, v in sd.items()} def average_state_dicts(state_dicts: List[Dict[str, torch.Tensor]]): new_sd = {} for k in state_dicts[0].keys(): tensors = [sd[k] for sd in state_dicts] new_t = sum(tensors) / len(tensors) assert isinstance(new_t, torch.Tensor) new_sd[k] = new_t return new_sd def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path (:obj:`str`): Path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir (:obj:`str`): Path to a directory containing a correctly shaped checkpoint save_path (:obj:`str`): Directory to save the new model """ hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt")) assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory" if len(ckpt_files) > 1: logger.info(f"averaging the weights of {ckpt_files}") state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f"missing keys: {missing}" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant if __name__ == "__main__": fire.Fire(convert_pl_to_hf)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/callbacks.py
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils import save_json def count_trainable_parameters(model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params logger = logging.getLogger(__name__) class Seq2SeqLoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(lrs) @rank_zero_only def _write_logs( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") metrics = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) # Log results od = Path(pl_module.hparams.output_dir) if type_path == "test": results_file = od / "test_results.txt" generations_file = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=True) generations_file.parent.mkdir(exist_ok=True) with open(results_file, "a+") as writer: for key in sorted(metrics): if key in ["log", "progress_bar", "preds"]: continue val = metrics[key] if isinstance(val, torch.Tensor): val = val.item() msg = f"{key}: {val:.6f}\n" writer.write(msg) if not save_generations: return if "preds" in metrics: content = "\n".join(metrics["preds"]) generations_file.open("w+").write(content) @rank_zero_only def on_train_start(self, trainer, pl_module): try: npars = pl_module.model.model.num_parameters() except AttributeError: npars = pl_module.model.num_parameters() n_trainable_pars = count_trainable_parameters(pl_module) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) @rank_zero_only def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): save_json(pl_module.metrics, pl_module.metrics_save_path) return self._write_logs(trainer, pl_module, "test") @rank_zero_only def on_validation_end(self, trainer: pl.Trainer, pl_module): save_json(pl_module.metrics, pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid") def get_checkpoint_callback(output_dir, metric, save_top_k=1, lower_is_better=False): """Saves the best model by validation ROUGE2 score.""" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" elif metric == "loss": exp = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2, bleu and loss, got {metric}, You can make your own by adding to" " this function." ) checkpoint_callback = ModelCheckpoint( dirpath=output_dir, filename=exp, monitor=f"val_{metric}", mode="min" if "loss" in metric else "max", save_top_k=save_top_k, ) return checkpoint_callback def get_early_stopping_callback(metric, patience): return EarlyStopping( monitor=f"val_{metric}", # does this need avg? mode="min" if "loss" in metric else "max", patience=patience, verbose=True, )
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/finetune.py
#!/usr/bin/env python import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from torch import nn from torch.utils.data import DataLoader from transformers import MBartTokenizer, T5ForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeq2SeqDataset, Seq2SeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa logger = logging.getLogger(__name__) class SummarizationModule(BaseTransformer): mode = "summarization" loss_names = ["loss"] metric_names = ROUGE_KEYS default_val_metric = "rouge2" def __init__(self, hparams, **kwargs): if hparams.sortish_sampler and hparams.gpus > 1: hparams.replace_sampler_ddp = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training") if hparams.sortish_sampler: raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously") super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs) use_task_specific_params(self.model, "summarization") save_git_info(self.hparams.output_dir) self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.model_type = self.config.model_type self.vocab_size = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size self.dataset_kwargs: dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": self.model.config.prefix or "", } n_observations_per_split = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.target_lens = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: freeze_embeds(self.model) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers self.decoder_start_token_id = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] self.model.config.decoder_start_token_id = self.decoder_start_token_id self.dataset_class = ( Seq2SeqDataset if hasattr(self.tokenizer, "prepare_seq2seq_batch") else LegacySeq2SeqDataset ) self.already_saved_batch = False self.eval_beams = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: self.eval_max_length = self.hparams.eval_max_gen_length else: self.eval_max_length = self.model.config.max_length self.val_metric = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def save_readable_batch(self, batch: Dict[str, torch.Tensor]) -> Dict[str, List[str]]: """A debugging utility""" readable_batch = { k: self.tokenizer.batch_decode(v.tolist()) if "mask" not in k else v.shape for k, v in batch.items() } save_json(readable_batch, Path(self.output_dir) / "text_batch.json") save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / "tok_batch.json") self.already_saved_batch = True return readable_batch def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: pad_token_id = self.tokenizer.pad_token_id src_ids, src_mask = batch["input_ids"], batch["attention_mask"] tgt_ids = batch["labels"] if isinstance(self.model, T5ForConditionalGeneration): decoder_input_ids = self.model._shift_right(tgt_ids) else: decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero batch["decoder_input_ids"] = decoder_input_ids self.save_readable_batch(batch) outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False) lm_logits = outputs["logits"] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id ce_loss_fct = nn.CrossEntropyLoss(ignore_index=pad_token_id) assert lm_logits.shape[-1] == self.vocab_size loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1)) else: lprobs = nn.functional.log_softmax(lm_logits, dim=-1) loss, nll_loss = label_smoothed_nll_loss( lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id ) return (loss,) @property def pad(self) -> int: return self.tokenizer.pad_token_id def training_step(self, batch, batch_idx) -> Dict: loss_tensors = self._step(batch) logs = dict(zip(self.loss_names, loss_tensors)) # tokens per batch logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() logs["bs"] = batch["input_ids"].shape[0] logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum() logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def validation_step(self, batch, batch_idx) -> Dict: return self._generative_step(batch) def validation_epoch_end(self, outputs, prefix="val") -> Dict: self.step_count += 1 losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} loss = losses["loss"] generative_metrics = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] } metric_val = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss) generative_metrics.update({k: v.item() for k, v in losses.items()}) losses.update(generative_metrics) all_metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} all_metrics["step_count"] = self.step_count self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path preds = flatten_list([x["preds"] for x in outputs]) return { "log": all_metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metric_tensor, } def calc_generative_metrics(self, preds, target) -> Dict: return calculate_rouge(preds, target) def _generative_step(self, batch: dict) -> dict: t0 = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') generated_ids = self.model.generate( batch["input_ids"], attention_mask=batch["attention_mask"], use_cache=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, ) gen_time = (time.time() - t0) / batch["input_ids"].shape[0] preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["labels"]) loss_tensors = self._step(batch) base_metrics = dict(zip(self.loss_names, loss_tensors)) rouge: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge) return base_metrics def test_step(self, batch, batch_idx): return self._generative_step(batch) def test_epoch_end(self, outputs): return self.validation_epoch_end(outputs, prefix="test") def get_dataset(self, type_path) -> Seq2SeqDataset: n_obs = self.n_obs[type_path] max_target_length = self.target_lens[type_path] dataset = self.dataset_class( self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": sampler = dataset.make_sortish_sampler(batch_size, distributed=self.hparams.gpus > 1) return DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler, ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": batch_sampler = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 ) return DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, # shuffle=False, num_workers=self.num_workers, # batch_size=None, ) else: return DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None, ) def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) return dataloader def val_dataloader(self) -> DataLoader: return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) def test_dataloader(self) -> DataLoader: return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=1024, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", default=56, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--val_max_target_length", default=142, # these defaults are optimized for CNNDM. For xsum, see README.md. type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--test_max_target_length", default=142, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument("--freeze_encoder", action="store_true") parser.add_argument("--freeze_embeds", action="store_true") parser.add_argument("--sortish_sampler", action="store_true", default=False) parser.add_argument("--overwrite_output_dir", action="store_true", default=False) parser.add_argument("--max_tokens_per_batch", type=int, default=None) parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument( "--task", type=str, default="summarization", required=False, help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) parser.add_argument("--src_lang", type=str, default="", required=False) parser.add_argument("--tgt_lang", type=str, default="", required=False) parser.add_argument("--eval_beams", type=int, default=None, required=False) parser.add_argument( "--val_metric", type=str, default=None, required=False, choices=["bleu", "rouge2", "loss", None] ) parser.add_argument("--eval_max_gen_length", type=int, default=None, help="never generate more than n tokens") parser.add_argument("--save_top_k", type=int, default=1, required=False, help="How many checkpoints to save") parser.add_argument( "--early_stopping_patience", type=int, default=-1, required=False, help=( "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" " val_check_interval will effect it." ), ) return parser class TranslationModule(SummarizationModule): mode = "translation" loss_names = ["loss"] metric_names = ["bleu"] default_val_metric = "bleu" def __init__(self, hparams, **kwargs): super().__init__(hparams, **kwargs) self.dataset_kwargs["src_lang"] = hparams.src_lang self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang def calc_generative_metrics(self, preds, target) -> dict: return calculate_bleu(preds, target) def main(args, model=None) -> SummarizationModule: Path(args.output_dir).mkdir(exist_ok=True) check_output_dir(args, expected_items=3) if model is None: if "summarization" in args.task: model: SummarizationModule = SummarizationModule(args) else: model: SummarizationModule = TranslationModule(args) dataset = Path(args.data_dir).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir).startswith("/tmp") or str(args.output_dir).startswith("/var") ): logger = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger project = os.environ.get("WANDB_PROJECT", dataset) logger = WandbLogger(name=model.output_dir.name, project=project) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") if args.early_stopping_patience >= 0: es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience) else: es_callback = False lower_is_better = args.val_metric == "loss" trainer: pl.Trainer = generic_train( model, args, logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback( args.output_dir, model.val_metric, args.save_top_k, lower_is_better ), early_stopping_callback=es_callback, logger=logger, ) pickle_save(model.hparams, model.output_dir / "hparams.pkl") if not args.do_predict: return model model.hparams.test_checkpoint = "" checkpoints = sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True)) if checkpoints: model.hparams.test_checkpoint = checkpoints[-1] trainer.resume_from_checkpoint = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() main(args)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/distil_marian_enro_teacher.sh
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar # export MAX_LEN=128 python distillation.py \ --learning_rate=3e-4 \ --do_train \ --fp16 \ --val_check_interval 0.25 \ --teacher Helsinki-NLP/opus-mt-en-ro \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --student_decoder_layers 3 --student_encoder_layers 6 \ --freeze_encoder --freeze_embeds \ --model_name_or_path IGNORED \ --alpha_hid=3. \ --train_batch_size=$BS --eval_batch_size=$BS \ --tokenizer_name Helsinki-NLP/opus-mt-en-ro \ --warmup_steps 500 --logger_name wandb \ --fp16_opt_level O1 --task translation --normalize_hidden --num_sanity_val_steps=0 \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/_test_make_student.py
import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch TINY_BART = "sshleifer/bart-tiny-random" TINY_T5 = "patrickvonplaten/t5-tiny-random" @require_torch class MakeStudentTester(unittest.TestCase): @cached_property def teacher_config(self): return AutoConfig.from_pretrained(TINY_BART) def test_valid_t5(self): student, *_ = create_student_by_copying_alternating_layers(TINY_T5, tempfile.mkdtemp(), e=1, d=1) self.assertEqual(student.config.num_hidden_layers, 1) def test_asymmetric_t5(self): student, *_ = create_student_by_copying_alternating_layers(TINY_T5, tempfile.mkdtemp(), e=1, d=None) def test_same_decoder_small_encoder(self): student, *_ = create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=1, d=None) self.assertEqual(student.config.encoder_layers, 1) self.assertEqual(student.config.decoder_layers, self.teacher_config.encoder_layers) def test_small_enc_small_dec(self): student, *_ = create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=1, d=1) self.assertEqual(student.config.encoder_layers, 1) self.assertEqual(student.config.decoder_layers, 1) def test_raises_assert(self): with self.assertRaises(AssertionError): create_student_by_copying_alternating_layers(TINY_BART, tempfile.mkdtemp(), e=None, d=None)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/finetune_pegasus_xsum.sh
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" # From appendix C of paper https://arxiv.org/abs/1912.08777 # Set --gradient_accumulation_steps so that effective batch size is 256 (2*128, 4*64, 8*32, 16*16) python finetune.py \ --learning_rate=1e-4 \ --do_train \ --do_predict \ --n_val 1000 \ --val_check_interval 0.25 \ --max_source_length 512 --max_target_length 56 \ --freeze_embeds --label_smoothing 0.1 --adafactor --task summarization_xsum \ "$@"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples_multi_gpu.py
# as due to their complexity multi-gpu tests could impact other tests, and to aid debug we have those in a separate module. import os import sys from pathlib import Path import torch from transformers.testing_utils import TestCasePlus, execute_subprocess_async, require_torch_multi_gpu from utils import load_json CUDA_AVAILABLE = torch.cuda.is_available() ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] CHEAP_ARGS = { "max_tokens_per_batch": None, "supervise_forward": True, "normalize_hidden": True, "label_smoothing": 0.2, "eval_max_gen_length": None, "eval_beams": 1, "val_metric": "loss", "save_top_k": 1, "adafactor": True, "early_stopping_patience": 2, "logger_name": "default", "length_penalty": 0.5, "cache_dir": "", "task": "summarization", "num_workers": 2, "alpha_hid": 0, "freeze_embeds": True, "enc_only": False, "tgt_suffix": "", "resume_from_checkpoint": None, "sortish_sampler": True, "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0, "n_tpu_cores": 0, "max_grad_norm": 1.0, "do_train": True, "do_predict": True, "accumulate_grad_batches": 1, "server_ip": "", "server_port": "", "seed": 42, "model_name_or_path": "sshleifer/bart-tiny-random", "config_name": "", "tokenizer_name": "facebook/bart-large", "do_lower_case": False, "learning_rate": 0.3, "lr_scheduler": "linear", "weight_decay": 0.0, "adam_epsilon": 1e-08, "warmup_steps": 0, "max_epochs": 1, "train_batch_size": 2, "eval_batch_size": 2, "max_source_length": 12, "max_target_length": 12, "val_max_target_length": 12, "test_max_target_length": 12, "fast_dev_run": False, "no_cache": False, "n_train": -1, "n_val": -1, "n_test": -1, "student_encoder_layers": 1, "freeze_encoder": False, "auto_scale_batch_size": False, "overwrite_output_dir": False, "student": None, } def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestSummarizationDistillerMultiGPU(TestCasePlus): @classmethod def setUpClass(cls): return cls @require_torch_multi_gpu def test_multi_gpu(self): updates = { "no_teacher": True, "freeze_encoder": True, "gpus": 2, "overwrite_output_dir": True, "sortish_sampler": True, } self._test_distiller_cli_fork(updates, check_contents=False) def _test_distiller_cli_fork(self, updates, check_contents=True): default_updates = { "label_smoothing": 0.0, "early_stopping_patience": -1, "train_batch_size": 1, "eval_batch_size": 2, "max_epochs": 2, "alpha_mlm": 0.2, "alpha_ce": 0.8, "do_predict": True, "model_name_or_path": "sshleifer/tinier_bart", "teacher": CHEAP_ARGS["model_name_or_path"], "val_check_interval": 0.5, } default_updates.update(updates) args_d: dict = CHEAP_ARGS.copy() tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) def convert(k, v): if k in ["tgt_suffix", "server_ip", "server_port", "out", "n_tpu_cores"]: return "" if v is False or v is None: return "" if v is True: # or len(str(v))==0: return f"--{k}" return f"--{k}={v}" cli_args = [x for x in (convert(k, v) for k, v in args_d.items()) if len(x)] cmd = [sys.executable, f"{self.test_file_dir}/distillation.py"] + cli_args execute_subprocess_async(cmd, env=self.get_env()) contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} ckpt_files = [p for p in contents if p.endswith("ckpt")] assert len(ckpt_files) > 0 self.assertIn("test_generations.txt", contents) self.assertIn("test_results.txt", contents) # get the following from the module, (we don't have access to `model` here) metrics_save_path = os.path.join(output_dir, "metrics.json") val_metric = "rouge2" metrics = load_json(metrics_save_path) # {'test': [{'test_avg_loss': 10.63731575012207, 'test_avg_rouge1': 0.0, 'test_avg_rouge2': 0.0, 'test_avg_rougeL': 0.0, 'test_avg_gen_time': 0.1822289228439331, 'test_avg_gen_len': 142.0, 'step_count': 1}]} print(metrics) last_step_stats = metrics["val"][-1] self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) self.assertIsInstance(last_step_stats[f"val_avg_{val_metric}"], float) self.assertEqual(len(metrics["test"]), 1) desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) / 2 + 1) self.assertEqual(len(metrics["val"]), desired_n_evals)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/precomputed_pseudo_labels.md
### Saved Pseudo-Labels These are the generations of various large models on various large **training** sets. All in all they took about 200 GPU hours to produce. ### Available Pseudo-labels | Dataset | Model | Link | Rouge Scores | Notes |---------|-----------------------------|----------------------------------------------------------------------------------------|--------------------|------------------------------------------------------------------------------------------------------------- | XSUM | `facebook/bart-large-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/xsum/bart_xsum_pl.tgz) | 49.8/28.0/42.5 | | XSUM | `google/pegasus-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/xsum/pegasus_xsum.tgz) | 53.3/32.7/46.5 | | XSUM | `facebook/bart-large-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/xsum/xsum_pl2_bart.tgz) | | Bart pseudolabels filtered to those with Rouge2 > 10.0 w GT. | CNN/DM | `sshleifer/pegasus-cnn-ft-v2` | [download](https://cdn-datasets.huggingface.co/pseudo/cnn_dm/pegasus_cnn_cnn_pls.tgz) | 47.316/26.65/44.56 | do not worry about the fact that train.source is one line shorter. | CNN/DM | `facebook/bart-large-cnn` | [download](https://cdn-datasets.huggingface.co/pseudo/cnn_dm/cnn_bart_pl.tgz) | | 5K (2%) are missing, there should be 282173 | CNN/DM | `google/pegasus-xsum` | [download](https://cdn-datasets.huggingface.co/pseudo/cnn_dm/pegasus_xsum_on_cnn.tgz) | 21.5/6.76/25 | extra labels for xsum distillation Used max_source_length=512, (and all other pegasus-xsum configuration). | EN-RO | `Helsinki-NLP/opus-mt-en-ro` | [download](https://cdn-datasets.huggingface.co/pseudo/wmt_en_ro/opus_mt_en_ro.tgz) | | | EN-RO | `facebook/mbart-large-en-ro` | [download](https://cdn-datasets.huggingface.co/pseudo/wmt_en_ro/mbart_large_en_ro.tgz) | | (EN_RO = WMT 2016 English-Romanian). Example Download Command: ```bash curl -S https://cdn-datasets.huggingface.co/pseudo/xsum/bart_xsum_pl.tgz | tar -xvz -C . ``` ### Generating New Pseudolabels Here is the command I used to generate the pseudolabels in the second row of the table, after downloading XSUM from [here](https://cdn-datasets.huggingface.co/summarization/xsum.tar.gz). ```bash python -m torch.distributed.launch --nproc_per_node=8 run_distributed_eval.py \ --model_name google/pegasus-xsum \ --save_dir pegasus_xsum \ --data_dir xsum \ --bs 8 --sync_timeout 60000 \ --max_source_length 512 \ --type_path train ``` + These commands takes a while to run. For example, `pegasus_cnn_cnn_pls.tgz` took 8 hours on 8 GPUs. + Pegasus does not work in fp16 :(, Bart, mBART and Marian do. + Even if you have 1 GPU, `run_distributed_eval.py` is 10-20% faster than `run_eval.py` because it uses `SortishSampler` to minimize padding computation. ### Contributions Feel free to contribute your own pseudolabels via PR. Add a row to this table with a new google drive link (or other command line downloadable link).
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/_test_bash_script.py
#!/usr/bin/env python import argparse import os import sys from unittest.mock import patch import pytorch_lightning as pl import timeout_decorator import torch from distillation import SummarizationDistiller, distill_main from finetune import SummarizationModule, main from transformers import MarianMTModel from transformers.file_utils import cached_path from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow from utils import load_json MARIAN_MODEL = "sshleifer/mar_enro_6_3_student" class TestMbartCc25Enro(TestCasePlus): def setUp(self): super().setUp() data_cached = cached_path( "https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz", extract_compressed_file=True, ) self.data_dir = f"{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k" @slow @require_torch_gpu def test_model_download(self): """This warms up the cache so that we can time the next test without including download time, which varies between machines.""" MarianMTModel.from_pretrained(MARIAN_MODEL) # @timeout_decorator.timeout(1200) @slow @require_torch_gpu def test_train_mbart_cc25_enro_script(self): env_vars_to_replace = { "$MAX_LEN": 64, "$BS": 64, "$GAS": 1, "$ENRO_DIR": self.data_dir, "facebook/mbart-large-cc25": MARIAN_MODEL, # "val_check_interval=0.25": "val_check_interval=1.0", "--learning_rate=3e-5": "--learning_rate 3e-4", "--num_train_epochs 6": "--num_train_epochs 1", } # Clean up bash script bash_script = (self.test_file_dir / "train_mbart_cc25_enro.sh").open().read().split("finetune.py")[1].strip() bash_script = bash_script.replace("\\\n", "").strip().replace('"$@"', "") for k, v in env_vars_to_replace.items(): bash_script = bash_script.replace(k, str(v)) output_dir = self.get_auto_remove_tmp_dir() # bash_script = bash_script.replace("--fp16 ", "") args = f""" --output_dir {output_dir} --tokenizer_name Helsinki-NLP/opus-mt-en-ro --sortish_sampler --do_predict --gpus 1 --freeze_encoder --n_train 40000 --n_val 500 --n_test 500 --fp16_opt_level O1 --num_sanity_val_steps 0 --eval_beams 2 """.split() # XXX: args.gpus > 1 : handle multi_gpu in the future testargs = ["finetune.py"] + bash_script.split() + args with patch.object(sys, "argv", testargs): parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() model = main(args) # Check metrics metrics = load_json(model.metrics_save_path) first_step_stats = metrics["val"][0] last_step_stats = metrics["val"][-1] self.assertEqual(len(metrics["val"]), (args.max_epochs / args.val_check_interval)) assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"], float) self.assertGreater(last_step_stats["val_avg_gen_time"], 0.01) # model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?) self.assertLessEqual(last_step_stats["val_avg_gen_time"], 1.0) # test learning requirements: # 1. BLEU improves over the course of training by more than 2 pts self.assertGreater(last_step_stats["val_avg_bleu"] - first_step_stats["val_avg_bleu"], 2) # 2. BLEU finishes above 17 self.assertGreater(last_step_stats["val_avg_bleu"], 17) # 3. test BLEU and val BLEU within ~1.1 pt. self.assertLess(abs(metrics["val"][-1]["val_avg_bleu"] - metrics["test"][-1]["test_avg_bleu"]), 1.1) # check lightning ckpt can be loaded and has a reasonable statedict contents = os.listdir(output_dir) ckpt_path = [x for x in contents if x.endswith(".ckpt")][0] full_path = os.path.join(args.output_dir, ckpt_path) ckpt = torch.load(full_path, map_location="cpu") expected_key = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.float32 # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: contents = {os.path.basename(p) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"]) == 1 class TestDistilMarianNoTeacher(TestCasePlus): @timeout_decorator.timeout(600) @slow @require_torch_gpu def test_opus_mt_distill_script(self): data_dir = f"{self.test_file_dir_str}/test_data/wmt_en_ro" env_vars_to_replace = { "--fp16_opt_level=O1": "", "$MAX_LEN": 128, "$BS": 16, "$GAS": 1, "$ENRO_DIR": data_dir, "$m": "sshleifer/student_marian_en_ro_6_1", "val_check_interval=0.25": "val_check_interval=1.0", } # Clean up bash script bash_script = ( (self.test_file_dir / "distil_marian_no_teacher.sh").open().read().split("distillation.py")[1].strip() ) bash_script = bash_script.replace("\\\n", "").strip().replace('"$@"', "") bash_script = bash_script.replace("--fp16 ", " ") for k, v in env_vars_to_replace.items(): bash_script = bash_script.replace(k, str(v)) output_dir = self.get_auto_remove_tmp_dir() bash_script = bash_script.replace("--fp16", "") epochs = 6 testargs = ( ["distillation.py"] + bash_script.split() + [ f"--output_dir={output_dir}", "--gpus=1", "--learning_rate=1e-3", f"--num_train_epochs={epochs}", "--warmup_steps=10", "--val_check_interval=1.0", "--do_predict", ] ) with patch.object(sys, "argv", testargs): parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationDistiller.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() # assert args.gpus == gpus THIS BREAKS for multi_gpu model = distill_main(args) # Check metrics metrics = load_json(model.metrics_save_path) first_step_stats = metrics["val"][0] last_step_stats = metrics["val"][-1] assert len(metrics["val"]) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check assert last_step_stats["val_avg_gen_time"] >= 0.01 assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved. assert isinstance(last_step_stats[f"val_avg_{model.val_metric}"], float) # check lightning ckpt can be loaded and has a reasonable statedict contents = os.listdir(output_dir) ckpt_path = [x for x in contents if x.endswith(".ckpt")][0] full_path = os.path.join(args.output_dir, ckpt_path) ckpt = torch.load(full_path, map_location="cpu") expected_key = "model.model.decoder.layers.0.encoder_attn_layer_norm.weight" assert expected_key in ckpt["state_dict"] assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.float32 # TODO: turn on args.do_predict when PL bug fixed. if args.do_predict: contents = {os.path.basename(p) for p in contents} assert "test_generations.txt" in contents assert "test_results.txt" in contents # assert len(metrics["val"]) == desired_n_evals assert len(metrics["test"]) == 1
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py
import argparse import logging import os import sys import tempfile from pathlib import Path import lightning_base import pytest import pytorch_lightning as pl import torch from convert_pl_checkpoint_to_hf import convert_pl_to_hf from distillation import distill_main from finetune import SummarizationModule, main from huggingface_hub import list_models from parameterized import parameterized from run_eval import generate_summaries_or_translations from torch import nn from transformers import AutoConfig, AutoModelForSeq2SeqLM from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow from utils import label_smoothed_nll_loss, lmap, load_json logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() CUDA_AVAILABLE = torch.cuda.is_available() CHEAP_ARGS = { "max_tokens_per_batch": None, "supervise_forward": True, "normalize_hidden": True, "label_smoothing": 0.2, "eval_max_gen_length": None, "eval_beams": 1, "val_metric": "loss", "save_top_k": 1, "adafactor": True, "early_stopping_patience": 2, "logger_name": "default", "length_penalty": 0.5, "cache_dir": "", "task": "summarization", "num_workers": 2, "alpha_hid": 0, "freeze_embeds": True, "enc_only": False, "tgt_suffix": "", "resume_from_checkpoint": None, "sortish_sampler": True, "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0, "n_tpu_cores": 0, "max_grad_norm": 1.0, "do_train": True, "do_predict": True, "accumulate_grad_batches": 1, "server_ip": "", "server_port": "", "seed": 42, "model_name_or_path": "sshleifer/bart-tiny-random", "config_name": "", "tokenizer_name": "facebook/bart-large", "do_lower_case": False, "learning_rate": 0.3, "lr_scheduler": "linear", "weight_decay": 0.0, "adam_epsilon": 1e-08, "warmup_steps": 0, "max_epochs": 1, "train_batch_size": 2, "eval_batch_size": 2, "max_source_length": 12, "max_target_length": 12, "val_max_target_length": 12, "test_max_target_length": 12, "fast_dev_run": False, "no_cache": False, "n_train": -1, "n_val": -1, "n_test": -1, "student_encoder_layers": 1, "freeze_encoder": False, "auto_scale_batch_size": False, "overwrite_output_dir": False, "student": None, } def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" T5_TINIER = "sshleifer/t5-tinier-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" FSMT_TINY = "stas/tiny-wmt19-en-de" stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestSummarizationDistiller(TestCasePlus): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks return cls @slow @require_torch_gpu def test_hub_configs(self): """I put require_torch_gpu cause I only want this to run with self-scheduled.""" model_list = list_models() org = "sshleifer" model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"] failures = [] for m in model_ids: if m in allowed_to_be_broken: continue try: AutoConfig.from_pretrained(m) except Exception: failures.append(m) assert not failures, f"The following models could not be loaded through AutoConfig: {failures}" def test_distill_no_teacher(self): updates = {"student_encoder_layers": 2, "student_decoder_layers": 1, "no_teacher": True} self._test_distiller_cli(updates) def test_distill_checkpointing_with_teacher(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "max_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", } model = self._test_distiller_cli(updates, check_contents=False) ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) self.assertEqual(len(transformer_ckpts), 2) examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines()) out_path = tempfile.mktemp() # XXX: not being cleaned up generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr")) self.assertTrue(Path(out_path).exists()) out_path_new = self.get_auto_remove_tmp_dir() convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new) assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin")) def test_loss_fn(self): model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY) input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"] target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device) decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line? lm_labels = target_ids[:, 1:].clone() # why clone? model_computed_loss = model( input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False ).loss logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits lprobs = nn.functional.log_softmax(logits, dim=-1) smoothed_loss, nll_loss = label_smoothed_nll_loss( lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id ) with self.assertRaises(AssertionError): # TODO: understand why this breaks self.assertEqual(nll_loss, model_computed_loss) def test_distill_mbart(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "num_train_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "task": "translation", "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", "tokenizer_name": MBART_TINY, "teacher": MBART_TINY, "src_lang": "en_XX", "tgt_lang": "ro_RO", } model = self._test_distiller_cli(updates, check_contents=False) assert model.model.config.model_type == "mbart" ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) all_files = list(Path(model.output_dir).glob("best_tfmr/*")) assert len(all_files) > 2 self.assertEqual(len(transformer_ckpts), 2) def test_distill_t5(self): updates = { "student_encoder_layers": 1, "student_decoder_layers": 1, "alpha_hid": 2.0, "teacher": T5_TINY, "model_name_or_path": T5_TINY, "tokenizer_name": T5_TINY, } self._test_distiller_cli(updates) def test_distill_different_base_models(self): updates = { "teacher": T5_TINY, "student": T5_TINIER, "model_name_or_path": T5_TINIER, "tokenizer_name": T5_TINIER, } self._test_distiller_cli(updates) def _test_distiller_cli(self, updates, check_contents=True): default_updates = { "label_smoothing": 0.0, "early_stopping_patience": -1, "train_batch_size": 1, "eval_batch_size": 2, "max_epochs": 2, "alpha_mlm": 0.2, "alpha_ce": 0.8, "do_predict": True, "model_name_or_path": "sshleifer/tinier_bart", "teacher": CHEAP_ARGS["model_name_or_path"], "val_check_interval": 0.5, } default_updates.update(updates) args_d: dict = CHEAP_ARGS.copy() tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) model = distill_main(argparse.Namespace(**args_d)) if not check_contents: return model contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} ckpt_files = [p for p in contents if p.endswith("ckpt")] assert len(ckpt_files) > 0 self.assertIn("test_generations.txt", contents) self.assertIn("test_results.txt", contents) metrics = load_json(model.metrics_save_path) last_step_stats = metrics["val"][-1] self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"]) self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float) desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1) self.assertEqual(len(metrics["val"]), desired_n_evals) self.assertEqual(len(metrics["test"]), 1) return model class TestTheRest(TestCasePlus): @parameterized.expand( [T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY], ) def test_finetune(self, model): args_d: dict = CHEAP_ARGS.copy() task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization" args_d["label_smoothing"] = 0.1 if task == "translation" else 0 tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, output_dir=output_dir, do_predict=True, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) assert "n_train" in args_d args = argparse.Namespace(**args_d) module = main(args) input_embeds = module.model.get_input_embeddings() assert not input_embeds.weight.requires_grad if model == T5_TINY: lm_head = module.model.lm_head assert not lm_head.weight.requires_grad assert (lm_head.weight == input_embeds.weight).all().item() elif model == FSMT_TINY: fsmt = module.model.model embed_pos = fsmt.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not fsmt.decoder.embed_tokens.weight.requires_grad # check that embeds are not the same assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens else: bart = module.model.model embed_pos = bart.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not bart.shared.weight.requires_grad # check that embeds are the same assert bart.decoder.embed_tokens == bart.encoder.embed_tokens assert bart.decoder.embed_tokens == bart.shared example_batch = load_json(module.output_dir / "text_batch.json") assert isinstance(example_batch, dict) assert len(example_batch) >= 4 def test_finetune_extra_model_args(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) args_d.update( data_dir=tmp_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # test models whose config includes the extra_model_args model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d1 = args_d.copy() args_d1.update( model_name_or_path=model, output_dir=output_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: args_d1[p] = 0.5 args = argparse.Namespace(**args_d1) model = main(args) for p in extra_model_params: assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}" # test models whose config doesn't include the extra_model_args model = T5_TINY output_dir = self.get_auto_remove_tmp_dir() args_d2 = args_d.copy() args_d2.update( model_name_or_path=model, output_dir=output_dir, ) unsupported_param = "encoder_layerdrop" args_d2[unsupported_param] = 0.5 args = argparse.Namespace(**args_d2) with pytest.raises(Exception) as excinfo: model = main(args) assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute" def test_finetune_lr_schedulers(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, output_dir=output_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # emulate finetune.py parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = {"--help": True} # --help test with pytest.raises(SystemExit) as excinfo: with CaptureStdout() as cs: args = parser.parse_args(args) assert False, "--help is expected to sys.exit" assert excinfo.type == SystemExit expected = lightning_base.arg_to_scheduler_metavar assert expected in cs.out, "--help is expected to list the supported schedulers" # --lr_scheduler=non_existing_scheduler test unsupported_param = "non_existing_scheduler" args = {f"--lr_scheduler={unsupported_param}"} with pytest.raises(SystemExit) as excinfo: with CaptureStderr() as cs: args = parser.parse_args(args) assert False, "invalid argument is expected to sys.exit" assert excinfo.type == SystemExit expected = f"invalid choice: '{unsupported_param}'" assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}" # --lr_scheduler=existing_scheduler test supported_param = "cosine" args_d1 = args_d.copy() args_d1["lr_scheduler"] = supported_param args = argparse.Namespace(**args_d1) model = main(args) assert ( getattr(model.hparams, "lr_scheduler") == supported_param ), f"lr_scheduler={supported_param} shouldn't fail"
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/run_eval.py
#!/usr/bin/env python import argparse import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List import torch from tqdm import tqdm from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params logger = getLogger(__name__) DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def generate_summaries_or_translations( examples: List[str], out_file: str, model_name: str, batch_size: int = 8, device: str = DEFAULT_DEVICE, fp16=False, task="summarization", prefix=None, **generate_kwargs, ) -> Dict: """Save model.generate results to <out_file>, and return how long it took.""" fout = Path(out_file).open("w", encoding="utf-8") model_name = str(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if fp16: model = model.half() tokenizer = AutoTokenizer.from_pretrained(model_name) logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. start_time = time.time() # update config with task specific params use_task_specific_params(model, task) if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" for examples_chunk in tqdm(list(chunks(examples, batch_size))): examples_chunk = [prefix + text for text in examples_chunk] batch = tokenizer(examples_chunk, return_tensors="pt", truncation=True, padding="longest").to(device) summaries = model.generate( input_ids=batch.input_ids, attention_mask=batch.attention_mask, **generate_kwargs, ) dec = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) for hypothesis in dec: fout.write(hypothesis + "\n") fout.flush() fout.close() runtime = int(time.time() - start_time) # seconds n_obs = len(examples) return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs, 4)} def datetime_now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def run_generate(verbose=True): """ Takes input text, generates output, and then using reference calculates the BLEU scores. The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. Args: verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout Returns: a tuple: ``(scores, params}`` - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` """ parser = argparse.ArgumentParser() parser.add_argument("model_name", type=str, help="like facebook/bart-large-cnn,t5-base, etc.") parser.add_argument("input_path", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--reference_path", type=str, required=False, help="like cnn_dm/test.target") parser.add_argument("--score_path", type=str, required=False, default="metrics.json", help="where to save metrics") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the beginning of src examples" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=-1, required=False, help="How many observations. Defaults to all." ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help=( "use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g." " lang=en-ru. If no value is passed, the current datetime string will be used." ), ) # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate args, rest = parser.parse_known_args() parsed_args = parse_numeric_n_bool_cl_kwargs(rest) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}") with open(args.input_path) as f: examples = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in f.readlines()] if args.n_obs > 0: examples = examples[: args.n_obs] Path(args.save_path).parent.mkdir(exist_ok=True) if args.reference_path is None and Path(args.score_path).exists(): warnings.warn(f"score_path {args.score_path} will be overwritten unless you type ctrl-c.") runtime_metrics = generate_summaries_or_translations( examples, args.save_path, args.model_name, batch_size=args.bs, device=args.device, fp16=args.fp16, task=args.task, prefix=args.prefix, **parsed_args, ) if args.reference_path is None: return {} # Compute scores score_fn = calculate_bleu if "translation" in args.task else calculate_rouge output_lns = [x.rstrip() for x in open(args.save_path).readlines()] reference_lns = [x.rstrip() for x in open(args.reference_path).readlines()][: len(output_lns)] scores: dict = score_fn(output_lns, reference_lns) scores.update(runtime_metrics) if args.dump_args: scores.update(parsed_args) if args.info: scores["info"] = args.info if verbose: print(scores) if args.score_path is not None: json.dump(scores, open(args.score_path, "w")) return scores if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@ run_generate(verbose=True)
0
mavonic_private_repos/transformers/examples/research_projects
mavonic_private_repos/transformers/examples/research_projects/seq2seq-distillation/sentence_splitter.py
import re from filelock import FileLock try: import nltk NLTK_AVAILABLE = True except (ImportError, ModuleNotFoundError): NLTK_AVAILABLE = False if NLTK_AVAILABLE: with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) def add_newline_to_end_of_each_sentence(x: str) -> str: """This was added to get rougeLsum scores matching published rougeL scores for BART and PEGASUS.""" re.sub("<n>", "", x) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(x))
0