|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" Fine-tuning the library models for sequence classification.""" |
|
|
|
|
|
import json |
|
import logging |
|
import os |
|
import sys |
|
from dataclasses import dataclass, field |
|
from pathlib import Path |
|
from typing import Optional |
|
|
|
import numpy as np |
|
from datasets import load_dataset |
|
|
|
from transformers import ( |
|
AutoConfig, |
|
AutoTokenizer, |
|
HfArgumentParser, |
|
PretrainedConfig, |
|
PushToHubCallback, |
|
TFAutoModelForSequenceClassification, |
|
TFTrainingArguments, |
|
create_optimizer, |
|
set_seed, |
|
) |
|
from transformers.utils import CONFIG_NAME, TF2_WEIGHTS_NAME, send_example_telemetry |
|
|
|
|
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "1" |
|
import tensorflow as tf |
|
|
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
class SavePretrainedCallback(tf.keras.callbacks.Callback): |
|
|
|
|
|
|
|
def __init__(self, output_dir, **kwargs): |
|
super().__init__() |
|
self.output_dir = output_dir |
|
|
|
def on_epoch_end(self, epoch, logs=None): |
|
self.model.save_pretrained(self.output_dir) |
|
|
|
|
|
|
|
|
|
|
|
|
|
@dataclass |
|
class DataTrainingArguments: |
|
""" |
|
Arguments pertaining to what data we are going to input our model for training and eval. |
|
|
|
Using `HfArgumentParser` we can turn this class |
|
into argparse arguments to be able to specify them on |
|
the command line. |
|
""" |
|
|
|
train_file: Optional[str] = field( |
|
default=None, metadata={"help": "A csv or a json file containing the training data."} |
|
) |
|
validation_file: Optional[str] = field( |
|
default=None, metadata={"help": "A csv or a json file containing the validation data."} |
|
) |
|
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."}) |
|
|
|
max_seq_length: int = field( |
|
default=128, |
|
metadata={ |
|
"help": ( |
|
"The maximum total input sequence length after tokenization. Sequences longer " |
|
"than this will be truncated, sequences shorter will be padded." |
|
) |
|
}, |
|
) |
|
overwrite_cache: bool = field( |
|
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} |
|
) |
|
pad_to_max_length: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Whether to pad all samples to `max_seq_length`. " |
|
"If False, will pad the samples dynamically when batching to the maximum length in the batch." |
|
"Data will always be padded when using TPUs." |
|
) |
|
}, |
|
) |
|
max_train_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of training examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
max_val_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of validation examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
max_test_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of test examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
|
|
def __post_init__(self): |
|
train_extension = self.train_file.split(".")[-1].lower() if self.train_file is not None else None |
|
validation_extension = ( |
|
self.validation_file.split(".")[-1].lower() if self.validation_file is not None else None |
|
) |
|
test_extension = self.test_file.split(".")[-1].lower() if self.test_file is not None else None |
|
extensions = {train_extension, validation_extension, test_extension} |
|
extensions.discard(None) |
|
assert len(extensions) != 0, "Need to supply at least one of --train_file, --validation_file or --test_file!" |
|
assert len(extensions) == 1, "All input files should have the same file extension, either csv or json!" |
|
assert "csv" in extensions or "json" in extensions, "Input files should have either .csv or .json extensions!" |
|
self.input_file_extension = extensions.pop() |
|
|
|
|
|
@dataclass |
|
class ModelArguments: |
|
""" |
|
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
|
""" |
|
|
|
model_name_or_path: str = field( |
|
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
|
) |
|
config_name: Optional[str] = field( |
|
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
|
) |
|
tokenizer_name: Optional[str] = field( |
|
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
|
) |
|
cache_dir: Optional[str] = field( |
|
default=None, |
|
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, |
|
) |
|
model_revision: str = field( |
|
default="main", |
|
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
|
) |
|
use_auth_token: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Will use the token generated when running `huggingface-cli login` (necessary to use this script " |
|
"with private models)." |
|
) |
|
}, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
|
|
|
|
|
|
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) |
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
|
|
|
|
|
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
|
else: |
|
model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
|
|
|
|
|
send_example_telemetry("run_text_classification", model_args, data_args, framework="tensorflow") |
|
|
|
output_dir = Path(training_args.output_dir) |
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
|
|
checkpoint = None |
|
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: |
|
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file(): |
|
checkpoint = output_dir |
|
logger.info( |
|
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" |
|
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
|
) |
|
else: |
|
raise ValueError( |
|
f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
|
"Use --overwrite_output_dir to continue regardless." |
|
) |
|
|
|
|
|
|
|
|
|
logging.basicConfig( |
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
|
datefmt="%m/%d/%Y %H:%M:%S", |
|
handlers=[logging.StreamHandler(sys.stdout)], |
|
) |
|
logger.setLevel(logging.INFO) |
|
|
|
logger.info(f"Training/evaluation parameters {training_args}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_files = {"train": data_args.train_file, "validation": data_args.validation_file, "test": data_args.test_file} |
|
data_files = {key: file for key, file in data_files.items() if file is not None} |
|
|
|
for key in data_files.keys(): |
|
logger.info(f"Loading a local file for {key}: {data_files[key]}") |
|
|
|
if data_args.input_file_extension == "csv": |
|
|
|
datasets = load_dataset( |
|
"csv", |
|
data_files=data_files, |
|
cache_dir=model_args.cache_dir, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
else: |
|
|
|
datasets = load_dataset("json", data_files=data_files, cache_dir=model_args.cache_dir) |
|
|
|
|
|
|
|
|
|
|
|
|
|
if "train" in datasets: |
|
|
|
|
|
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"] |
|
if is_regression: |
|
num_labels = 1 |
|
else: |
|
|
|
|
|
label_list = datasets["train"].unique("label") |
|
label_list.sort() |
|
num_labels = len(label_list) |
|
|
|
else: |
|
num_labels = None |
|
label_list = None |
|
is_regression = None |
|
|
|
|
|
|
|
if checkpoint is not None: |
|
config_path = training_args.output_dir |
|
elif model_args.config_name: |
|
config_path = model_args.config_name |
|
else: |
|
config_path = model_args.model_name_or_path |
|
if num_labels is not None: |
|
config = AutoConfig.from_pretrained( |
|
config_path, |
|
num_labels=num_labels, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
else: |
|
config = AutoConfig.from_pretrained( |
|
config_path, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
|
|
|
|
|
|
column_names = {col for cols in datasets.column_names.values() for col in cols} |
|
non_label_column_names = [name for name in column_names if name != "label"] |
|
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: |
|
sentence1_key, sentence2_key = "sentence1", "sentence2" |
|
elif "sentence1" in non_label_column_names: |
|
sentence1_key, sentence2_key = "sentence1", None |
|
else: |
|
if len(non_label_column_names) >= 2: |
|
sentence1_key, sentence2_key = non_label_column_names[:2] |
|
else: |
|
sentence1_key, sentence2_key = non_label_column_names[0], None |
|
|
|
if data_args.max_seq_length > tokenizer.model_max_length: |
|
logger.warning( |
|
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" |
|
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." |
|
) |
|
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) |
|
|
|
|
|
if "train" in datasets: |
|
if not is_regression and config.label2id != PretrainedConfig(num_labels=num_labels).label2id: |
|
label_name_to_id = config.label2id |
|
if sorted(label_name_to_id.keys()) == sorted(label_list): |
|
label_to_id = label_name_to_id |
|
else: |
|
logger.warning( |
|
"Your model seems to have been trained with labels, but they don't match the dataset: ", |
|
f"model labels: {sorted(label_name_to_id.keys())}, dataset labels:" |
|
f" {sorted(label_list)}.\nIgnoring the model labels as a result.", |
|
) |
|
label_to_id = {v: i for i, v in enumerate(label_list)} |
|
elif not is_regression: |
|
label_to_id = {v: i for i, v in enumerate(label_list)} |
|
else: |
|
label_to_id = None |
|
|
|
config.label2id = label_to_id |
|
if config.label2id is not None: |
|
config.id2label = {id: label for label, id in label_to_id.items()} |
|
else: |
|
config.id2label = None |
|
else: |
|
label_to_id = config.label2id |
|
|
|
if "validation" in datasets and config.label2id is not None: |
|
validation_label_list = datasets["validation"].unique("label") |
|
for val_label in validation_label_list: |
|
assert val_label in label_to_id, f"Label {val_label} is in the validation set but not the training set!" |
|
|
|
def preprocess_function(examples): |
|
|
|
args = ( |
|
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) |
|
) |
|
result = tokenizer(*args, max_length=max_seq_length, truncation=True) |
|
|
|
|
|
if config.label2id is not None and "label" in examples: |
|
result["label"] = [(config.label2id[l] if l != -1 else -1) for l in examples["label"]] |
|
return result |
|
|
|
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache) |
|
|
|
|
|
|
|
with training_args.strategy.scope(): |
|
|
|
|
|
set_seed(training_args.seed) |
|
|
|
|
|
|
|
if checkpoint is None: |
|
model_path = model_args.model_name_or_path |
|
else: |
|
model_path = checkpoint |
|
model = TFAutoModelForSequenceClassification.from_pretrained( |
|
model_path, |
|
config=config, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
|
|
|
|
dataset_options = tf.data.Options() |
|
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF |
|
num_replicas = training_args.strategy.num_replicas_in_sync |
|
|
|
tf_data = {} |
|
max_samples = { |
|
"train": data_args.max_train_samples, |
|
"validation": data_args.max_val_samples, |
|
"test": data_args.max_test_samples, |
|
} |
|
for key in ("train", "validation", "test"): |
|
if key not in datasets: |
|
tf_data[key] = None |
|
continue |
|
if ( |
|
(key == "train" and not training_args.do_train) |
|
or (key == "validation" and not training_args.do_eval) |
|
or (key == "test" and not training_args.do_predict) |
|
): |
|
tf_data[key] = None |
|
continue |
|
if key in ("train", "validation"): |
|
assert "label" in datasets[key].features, f"Missing labels from {key} data!" |
|
if key == "train": |
|
shuffle = True |
|
batch_size = training_args.per_device_train_batch_size * num_replicas |
|
else: |
|
shuffle = False |
|
batch_size = training_args.per_device_eval_batch_size * num_replicas |
|
samples_limit = max_samples[key] |
|
dataset = datasets[key] |
|
if samples_limit is not None: |
|
dataset = dataset.select(range(samples_limit)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = model.prepare_tf_dataset( |
|
dataset, |
|
shuffle=shuffle, |
|
batch_size=batch_size, |
|
tokenizer=tokenizer, |
|
) |
|
data = data.with_options(dataset_options) |
|
tf_data[key] = data |
|
|
|
|
|
|
|
|
|
if training_args.do_train: |
|
num_train_steps = len(tf_data["train"]) * training_args.num_train_epochs |
|
if training_args.warmup_steps > 0: |
|
num_warmup_steps = training_args.warmup_steps |
|
elif training_args.warmup_ratio > 0: |
|
num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) |
|
else: |
|
num_warmup_steps = 0 |
|
|
|
optimizer, schedule = create_optimizer( |
|
init_lr=training_args.learning_rate, |
|
num_train_steps=num_train_steps, |
|
num_warmup_steps=num_warmup_steps, |
|
adam_beta1=training_args.adam_beta1, |
|
adam_beta2=training_args.adam_beta2, |
|
adam_epsilon=training_args.adam_epsilon, |
|
weight_decay_rate=training_args.weight_decay, |
|
adam_global_clipnorm=training_args.max_grad_norm, |
|
) |
|
else: |
|
optimizer = None |
|
if is_regression: |
|
metrics = [] |
|
else: |
|
metrics = ["accuracy"] |
|
|
|
|
|
model.compile(optimizer=optimizer, metrics=metrics) |
|
|
|
|
|
|
|
push_to_hub_model_id = training_args.push_to_hub_model_id |
|
model_name = model_args.model_name_or_path.split("/")[-1] |
|
if not push_to_hub_model_id: |
|
push_to_hub_model_id = f"{model_name}-finetuned-text-classification" |
|
|
|
model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "text-classification"} |
|
|
|
if training_args.push_to_hub: |
|
callbacks = [ |
|
PushToHubCallback( |
|
output_dir=training_args.output_dir, |
|
hub_model_id=push_to_hub_model_id, |
|
hub_token=training_args.push_to_hub_token, |
|
tokenizer=tokenizer, |
|
**model_card_kwargs, |
|
) |
|
] |
|
else: |
|
callbacks = [] |
|
|
|
|
|
|
|
if tf_data["train"] is not None: |
|
model.fit( |
|
tf_data["train"], |
|
validation_data=tf_data["validation"], |
|
epochs=int(training_args.num_train_epochs), |
|
callbacks=callbacks, |
|
) |
|
if tf_data["validation"] is not None: |
|
logger.info("Computing metrics on validation data...") |
|
if is_regression: |
|
loss = model.evaluate(tf_data["validation"]) |
|
logger.info(f"Eval loss: {loss:.5f}") |
|
else: |
|
loss, accuracy = model.evaluate(tf_data["validation"]) |
|
logger.info(f"Eval loss: {loss:.5f}, Eval accuracy: {accuracy * 100:.4f}%") |
|
if training_args.output_dir is not None: |
|
output_eval_file = os.path.join(training_args.output_dir, "all_results.json") |
|
eval_dict = {"eval_loss": loss} |
|
if not is_regression: |
|
eval_dict["eval_accuracy"] = accuracy |
|
with open(output_eval_file, "w") as writer: |
|
writer.write(json.dumps(eval_dict)) |
|
|
|
|
|
|
|
if tf_data["test"] is not None: |
|
logger.info("Doing predictions on test dataset...") |
|
predictions = model.predict(tf_data["test"])["logits"] |
|
predicted_class = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1) |
|
output_test_file = os.path.join(training_args.output_dir, "test_results.txt") |
|
with open(output_test_file, "w") as writer: |
|
writer.write("index\tprediction\n") |
|
for index, item in enumerate(predicted_class): |
|
if is_regression: |
|
writer.write(f"{index}\t{item:3.3f}\n") |
|
else: |
|
item = config.id2label[item] |
|
writer.write(f"{index}\t{item}\n") |
|
logger.info(f"Wrote predictions to {output_test_file}!") |
|
|
|
|
|
if training_args.output_dir is not None and not training_args.push_to_hub: |
|
|
|
model.save_pretrained(training_args.output_dir) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|