| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """ Finetuning the library models for sequence classification on GLUE.""" |
| |
|
|
| import logging |
| import os |
| import random |
| import sys |
| from dataclasses import dataclass, field |
| from typing import Optional |
|
|
| import numpy as np |
| from datasets import load_dataset, load_metric |
|
|
| import transformers |
| from transformers import ( |
| AutoConfig, |
| AutoModelForSequenceClassification, |
| AutoTokenizer, |
| DataCollatorWithPadding, |
| EvalPrediction, |
| HfArgumentParser, |
| PretrainedConfig, |
| Trainer, |
| TrainingArguments, |
| default_data_collator, |
| set_seed, |
| ) |
| from transformers.trainer_utils import get_last_checkpoint, is_main_process |
| from transformers.utils import check_min_version |
|
|
|
|
| |
| check_min_version("4.5.0") |
|
|
| task_to_keys = { |
| "cola": ("sentence", None), |
| "mnli": ("premise", "hypothesis"), |
| "mrpc": ("sentence1", "sentence2"), |
| "qnli": ("question", "sentence"), |
| "qqp": ("question1", "question2"), |
| "rte": ("sentence1", "sentence2"), |
| "sst2": ("sentence", None), |
| "stsb": ("sentence1", "sentence2"), |
| "wnli": ("sentence1", "sentence2"), |
| } |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| @dataclass |
| class DataTrainingArguments: |
| """ |
| Arguments pertaining to what data we are going to input our model for training and eval. |
| |
| Using `HfArgumentParser` we can turn this class |
| into argparse arguments to be able to specify them on |
| the command line. |
| """ |
|
|
| task_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())}, |
| ) |
| max_seq_length: int = field( |
| default=128, |
| metadata={ |
| "help": "The maximum total input sequence length after tokenization. Sequences longer " |
| "than this will be truncated, sequences shorter will be padded." |
| }, |
| ) |
| overwrite_cache: bool = field( |
| default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} |
| ) |
| pad_to_max_length: bool = field( |
| default=True, |
| metadata={ |
| "help": "Whether to pad all samples to `max_seq_length`. " |
| "If False, will pad the samples dynamically when batching to the maximum length in the batch." |
| }, |
| ) |
| max_train_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": "For debugging purposes or quicker training, truncate the number of training examples to this " |
| "value if set." |
| }, |
| ) |
| max_val_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " |
| "value if set." |
| }, |
| ) |
| max_test_samples: Optional[int] = field( |
| default=None, |
| metadata={ |
| "help": "For debugging purposes or quicker training, truncate the number of test examples to this " |
| "value if set." |
| }, |
| ) |
| train_file: Optional[str] = field( |
| default=None, metadata={"help": "A csv or a json file containing the training data."} |
| ) |
| validation_file: Optional[str] = field( |
| default=None, metadata={"help": "A csv or a json file containing the validation data."} |
| ) |
| test_file: Optional[str] = field( |
| default=None, metadata={"help": "A csv or a json file containing the test data."} |
| ) |
|
|
| def __post_init__(self): |
| if self.task_name is not None: |
| self.task_name = self.task_name.lower() |
| if self.task_name not in task_to_keys.keys(): |
| raise ValueError( |
| "Unknown task, you should pick one in " + ",".join(task_to_keys.keys()) |
| ) |
| elif self.train_file is None or self.validation_file is None: |
| raise ValueError("Need either a GLUE task or a training/validation file.") |
| else: |
| train_extension = self.train_file.split(".")[-1] |
| assert train_extension in [ |
| "csv", |
| "json", |
| ], "`train_file` should be a csv or a json file." |
| validation_extension = self.validation_file.split(".")[-1] |
| assert ( |
| validation_extension == train_extension |
| ), "`validation_file` should have the same extension (csv or json) as `train_file`." |
|
|
|
|
| @dataclass |
| class ModelArguments: |
| """ |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
| """ |
|
|
| model_name_or_path: str = field( |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
| ) |
| config_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "Pretrained config name or path if not the same as model_name"}, |
| ) |
| tokenizer_name: Optional[str] = field( |
| default=None, |
| metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}, |
| ) |
| cache_dir: Optional[str] = field( |
| default=None, |
| metadata={ |
| "help": "Where do you want to store the pretrained models downloaded from huggingface.co" |
| }, |
| ) |
| use_fast_tokenizer: bool = field( |
| default=True, |
| metadata={ |
| "help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not." |
| }, |
| ) |
| model_revision: str = field( |
| default="main", |
| metadata={ |
| "help": "The specific model version to use (can be a branch name, tag name or commit id)." |
| }, |
| ) |
| use_auth_token: bool = field( |
| default=False, |
| metadata={ |
| "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " |
| "with private models)." |
| }, |
| ) |
|
|
|
|
| def main(): |
| |
| |
| |
|
|
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
| |
| |
| model_args, data_args, training_args = parser.parse_json_file( |
| json_file=os.path.abspath(sys.argv[1]) |
| ) |
| else: |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
| |
| last_checkpoint = None |
| if ( |
| os.path.isdir(training_args.output_dir) |
| and training_args.do_train |
| and not training_args.overwrite_output_dir |
| ): |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: |
| raise ValueError( |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
| "Use --overwrite_output_dir to overcome." |
| ) |
| elif last_checkpoint is not None: |
| logger.info( |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
| ) |
|
|
| |
| logging.basicConfig( |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
| datefmt="%m/%d/%Y %H:%M:%S", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) |
|
|
| |
| logger.warning( |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" |
| + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" |
| ) |
| |
| if is_main_process(training_args.local_rank): |
| transformers.utils.logging.set_verbosity_info() |
| transformers.utils.logging.enable_default_handler() |
| transformers.utils.logging.enable_explicit_format() |
| logger.info(f"Training/evaluation parameters {training_args}") |
|
|
| |
| set_seed(training_args.seed) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| if data_args.task_name is not None: |
| |
| datasets = load_dataset("glue", data_args.task_name) |
| else: |
| |
| |
| data_files = {"train": data_args.train_file, "validation": data_args.validation_file} |
|
|
| |
| |
| if training_args.do_predict: |
| if data_args.test_file is not None: |
| train_extension = data_args.train_file.split(".")[-1] |
| test_extension = data_args.test_file.split(".")[-1] |
| assert ( |
| test_extension == train_extension |
| ), "`test_file` should have the same extension (csv or json) as `train_file`." |
| data_files["test"] = data_args.test_file |
| else: |
| raise ValueError("Need either a GLUE task or a test file for `do_predict`.") |
|
|
| for key in data_files.keys(): |
| logger.info(f"load a local file for {key}: {data_files[key]}") |
|
|
| if data_args.train_file.endswith(".csv"): |
| |
| datasets = load_dataset("csv", data_files=data_files) |
| else: |
| |
| datasets = load_dataset("json", data_files=data_files) |
| |
| |
|
|
| |
| if data_args.task_name is not None: |
| is_regression = data_args.task_name == "stsb" |
| if not is_regression: |
| label_list = datasets["train"].features["label"].names |
| num_labels = len(label_list) |
| else: |
| num_labels = 1 |
| else: |
| |
| is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] |
| if is_regression: |
| num_labels = 1 |
| else: |
| |
| |
| label_list = datasets["train"].unique("label") |
| label_list.sort() |
| num_labels = len(label_list) |
|
|
| |
| |
| |
| |
| config = AutoConfig.from_pretrained( |
| model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
| num_labels=num_labels, |
| finetuning_task=data_args.task_name, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| use_auth_token=True if model_args.use_auth_token else None, |
| ) |
| tokenizer = AutoTokenizer.from_pretrained( |
| model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
| cache_dir=model_args.cache_dir, |
| use_fast=model_args.use_fast_tokenizer, |
| revision=model_args.model_revision, |
| use_auth_token=True if model_args.use_auth_token else None, |
| ) |
| model = AutoModelForSequenceClassification.from_pretrained( |
| model_args.model_name_or_path, |
| from_tf=bool(".ckpt" in model_args.model_name_or_path), |
| config=config, |
| cache_dir=model_args.cache_dir, |
| revision=model_args.model_revision, |
| use_auth_token=True if model_args.use_auth_token else None, |
| ) |
|
|
| |
| if data_args.task_name is not None: |
| sentence1_key, sentence2_key = task_to_keys[data_args.task_name] |
| else: |
| |
| non_label_column_names = [ |
| name for name in datasets["train"].column_names if name != "label" |
| ] |
| if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: |
| sentence1_key, sentence2_key = "sentence1", "sentence2" |
| else: |
| if len(non_label_column_names) >= 2: |
| sentence1_key, sentence2_key = non_label_column_names[:2] |
| else: |
| sentence1_key, sentence2_key = non_label_column_names[0], None |
|
|
| |
| if data_args.pad_to_max_length: |
| padding = "max_length" |
| else: |
| |
| padding = False |
|
|
| |
| label_to_id = None |
| if ( |
| model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id |
| and data_args.task_name is not None |
| and not is_regression |
| ): |
| |
| label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} |
| if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): |
| label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} |
| else: |
| logger.warn( |
| "Your model seems to have been trained with labels, but they don't match the dataset: ", |
| f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." |
| "\nIgnoring the model labels as a result.", |
| ) |
| elif data_args.task_name is None and not is_regression: |
| label_to_id = {v: i for i, v in enumerate(label_list)} |
|
|
| if data_args.max_seq_length > tokenizer.model_max_length: |
| logger.warn( |
| f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" |
| f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." |
| ) |
| max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) |
|
|
| def preprocess_function(examples): |
| |
| args = ( |
| (examples[sentence1_key],) |
| if sentence2_key is None |
| else (examples[sentence1_key], examples[sentence2_key]) |
| ) |
| result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True) |
|
|
| |
| if label_to_id is not None and "label" in examples: |
| result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]] |
| return result |
|
|
| datasets = datasets.map( |
| preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache |
| ) |
| if training_args.do_train: |
| if "train" not in datasets: |
| raise ValueError("--do_train requires a train dataset") |
| train_dataset = datasets["train"] |
| if data_args.max_train_samples is not None: |
| train_dataset = train_dataset.select(range(data_args.max_train_samples)) |
|
|
| if training_args.do_eval: |
| if "validation" not in datasets and "validation_matched" not in datasets: |
| raise ValueError("--do_eval requires a validation dataset") |
| eval_dataset = datasets[ |
| "validation_matched" if data_args.task_name == "mnli" else "validation" |
| ] |
| if data_args.max_val_samples is not None: |
| eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) |
|
|
| if ( |
| training_args.do_predict |
| or data_args.task_name is not None |
| or data_args.test_file is not None |
| ): |
| if "test" not in datasets and "test_matched" not in datasets: |
| raise ValueError("--do_predict requires a test dataset") |
| test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"] |
| if data_args.max_test_samples is not None: |
| test_dataset = test_dataset.select(range(data_args.max_test_samples)) |
|
|
| |
| if training_args.do_train: |
| for index in random.sample(range(len(train_dataset)), 3): |
| logger.info(f"Sample {index} of the training set: {train_dataset[index]}.") |
|
|
| |
| if data_args.task_name is not None: |
| metric = load_metric("glue", data_args.task_name) |
| |
| |
|
|
| |
| |
| def compute_metrics(p: EvalPrediction): |
| preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions |
| preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1) |
| if data_args.task_name is not None: |
| result = metric.compute(predictions=preds, references=p.label_ids) |
| if len(result) > 1: |
| result["combined_score"] = np.mean(list(result.values())).item() |
| return result |
| elif is_regression: |
| return {"mse": ((preds - p.label_ids) ** 2).mean().item()} |
| else: |
| return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()} |
|
|
| |
| if data_args.pad_to_max_length: |
| data_collator = default_data_collator |
| elif training_args.fp16: |
| data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) |
| else: |
| data_collator = None |
|
|
| |
| trainer = Trainer( |
| model=model, |
| args=training_args, |
| train_dataset=train_dataset if training_args.do_train else None, |
| eval_dataset=eval_dataset if training_args.do_eval else None, |
| compute_metrics=compute_metrics, |
| tokenizer=tokenizer, |
| data_collator=data_collator, |
| ) |
|
|
| |
| if training_args.do_train: |
| checkpoint = None |
| if last_checkpoint is not None: |
| checkpoint = last_checkpoint |
| elif os.path.isdir(model_args.model_name_or_path): |
| |
| |
| if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels: |
| checkpoint = model_args.model_name_or_path |
|
|
| train_result = trainer.train(resume_from_checkpoint=checkpoint) |
| metrics = train_result.metrics |
| max_train_samples = ( |
| data_args.max_train_samples |
| if data_args.max_train_samples is not None |
| else len(train_dataset) |
| ) |
| metrics["train_samples"] = min(max_train_samples, len(train_dataset)) |
|
|
| trainer.save_model() |
|
|
| trainer.log_metrics("train", metrics) |
| trainer.save_metrics("train", metrics) |
| trainer.save_state() |
|
|
| |
| if training_args.do_eval: |
| logger.info("*** Evaluate ***") |
|
|
| |
| tasks = [data_args.task_name] |
| eval_datasets = [eval_dataset] |
| if data_args.task_name == "mnli": |
| tasks.append("mnli-mm") |
| eval_datasets.append(datasets["validation_mismatched"]) |
|
|
| for eval_dataset, task in zip(eval_datasets, tasks): |
| metrics = trainer.evaluate(eval_dataset=eval_dataset) |
|
|
| max_val_samples = ( |
| data_args.max_val_samples |
| if data_args.max_val_samples is not None |
| else len(eval_dataset) |
| ) |
| metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) |
|
|
| trainer.log_metrics("eval", metrics) |
| trainer.save_metrics("eval", metrics) |
|
|
| if training_args.do_predict: |
| logger.info("*** Test ***") |
|
|
| |
| tasks = [data_args.task_name] |
| test_datasets = [test_dataset] |
| if data_args.task_name == "mnli": |
| tasks.append("mnli-mm") |
| test_datasets.append(datasets["test_mismatched"]) |
|
|
| for test_dataset, task in zip(test_datasets, tasks): |
| |
| test_dataset.remove_columns_("label") |
| predictions = trainer.predict(test_dataset=test_dataset).predictions |
| predictions = ( |
| np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) |
| ) |
|
|
| output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt") |
| if trainer.is_world_process_zero(): |
| with open(output_test_file, "w") as writer: |
| logger.info(f"***** Test results {task} *****") |
| writer.write("index\tprediction\n") |
| for index, item in enumerate(predictions): |
| if is_regression: |
| writer.write(f"{index}\t{item:3.3f}\n") |
| else: |
| item = label_list[item] |
| writer.write(f"{index}\t{item}\n") |
|
|
|
|
| def _mp_fn(index): |
| |
| main() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|