interactSpeech
/
docs
/transformers
/examples
/pytorch
/semantic-segmentation
/run_semantic_segmentation.py
| #!/usr/bin/env python | |
| # Copyright 2022 The HuggingFace Inc. team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| import json | |
| import logging | |
| import os | |
| import sys | |
| import warnings | |
| from dataclasses import dataclass, field | |
| from functools import partial | |
| from typing import Optional | |
| import albumentations as A | |
| import evaluate | |
| import numpy as np | |
| import torch | |
| from albumentations.pytorch import ToTensorV2 | |
| from datasets import load_dataset | |
| from huggingface_hub import hf_hub_download | |
| from torch import nn | |
| import transformers | |
| from transformers import ( | |
| AutoConfig, | |
| AutoImageProcessor, | |
| AutoModelForSemanticSegmentation, | |
| HfArgumentParser, | |
| Trainer, | |
| TrainingArguments, | |
| default_data_collator, | |
| ) | |
| from transformers.trainer_utils import get_last_checkpoint | |
| from transformers.utils import check_min_version, send_example_telemetry | |
| from transformers.utils.versions import require_version | |
| """ Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation leveraging the Trainer API.""" | |
| logger = logging.getLogger(__name__) | |
| # Will error if the minimal version of Transformers is not installed. Remove at your own risks. | |
| check_min_version("4.52.0.dev0") | |
| require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") | |
| def reduce_labels_transform(labels: np.ndarray, **kwargs) -> np.ndarray: | |
| """Set `0` label as with value 255 and then reduce all other labels by 1. | |
| Example: | |
| Initial class labels: 0 - background; 1 - road; 2 - car; | |
| Transformed class labels: 255 - background; 0 - road; 1 - car; | |
| **kwargs are required to use this function with albumentations. | |
| """ | |
| labels[labels == 0] = 255 | |
| labels = labels - 1 | |
| labels[labels == 254] = 255 | |
| return labels | |
| class DataTrainingArguments: | |
| """ | |
| Arguments pertaining to what data we are going to input our model for training and eval. | |
| Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify | |
| them on the command line. | |
| """ | |
| dataset_name: Optional[str] = field( | |
| default="segments/sidewalk-semantic", | |
| metadata={ | |
| "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." | |
| }, | |
| ) | |
| dataset_config_name: Optional[str] = field( | |
| default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} | |
| ) | |
| train_val_split: Optional[float] = field( | |
| default=0.15, metadata={"help": "Percent to split off of train for validation."} | |
| ) | |
| max_train_samples: Optional[int] = field( | |
| default=None, | |
| metadata={ | |
| "help": ( | |
| "For debugging purposes or quicker training, truncate the number of training examples to this " | |
| "value if set." | |
| ) | |
| }, | |
| ) | |
| max_eval_samples: Optional[int] = field( | |
| default=None, | |
| metadata={ | |
| "help": ( | |
| "For debugging purposes or quicker training, truncate the number of evaluation examples to this " | |
| "value if set." | |
| ) | |
| }, | |
| ) | |
| do_reduce_labels: Optional[bool] = field( | |
| default=False, | |
| metadata={"help": "Whether or not to reduce all labels by 1 and replace background by 255."}, | |
| ) | |
| reduce_labels: Optional[bool] = field( | |
| default=False, | |
| metadata={"help": "Whether or not to reduce all labels by 1 and replace background by 255."}, | |
| ) | |
| def __post_init__(self): | |
| if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): | |
| raise ValueError( | |
| "You must specify either a dataset name from the hub or a train and/or validation directory." | |
| ) | |
| if self.reduce_labels: | |
| self.do_reduce_labels = self.reduce_labels | |
| warnings.warn( | |
| "The `reduce_labels` argument is deprecated and will be removed in v4.45. Please use `do_reduce_labels` instead.", | |
| FutureWarning, | |
| ) | |
| class ModelArguments: | |
| """ | |
| Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. | |
| """ | |
| model_name_or_path: str = field( | |
| default="nvidia/mit-b0", | |
| metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, | |
| ) | |
| config_name: Optional[str] = field( | |
| default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} | |
| ) | |
| cache_dir: Optional[str] = field( | |
| default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} | |
| ) | |
| model_revision: str = field( | |
| default="main", | |
| metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, | |
| ) | |
| image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) | |
| token: str = field( | |
| default=None, | |
| metadata={ | |
| "help": ( | |
| "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " | |
| "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." | |
| ) | |
| }, | |
| ) | |
| trust_remote_code: bool = field( | |
| default=False, | |
| metadata={ | |
| "help": ( | |
| "Whether to trust the execution of code from datasets/models defined on the Hub." | |
| " This option should only be set to `True` for repositories you trust and in which you have read the" | |
| " code, as it will execute code present on the Hub on your local machine." | |
| ) | |
| }, | |
| ) | |
| def main(): | |
| # See all possible arguments in src/transformers/training_args.py | |
| # or by passing the --help flag to this script. | |
| # We now keep distinct sets of args, for a cleaner separation of concerns. | |
| parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) | |
| if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): | |
| # If we pass only one argument to the script and it's the path to a json file, | |
| # let's parse it to get our arguments. | |
| model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) | |
| else: | |
| model_args, data_args, training_args = parser.parse_args_into_dataclasses() | |
| # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The | |
| # information sent is the one passed as arguments along with your Python/PyTorch versions. | |
| send_example_telemetry("run_semantic_segmentation", model_args, data_args) | |
| # Setup logging | |
| logging.basicConfig( | |
| format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", | |
| datefmt="%m/%d/%Y %H:%M:%S", | |
| handlers=[logging.StreamHandler(sys.stdout)], | |
| ) | |
| if training_args.should_log: | |
| # The default of training_args.log_level is passive, so we set log level at info here to have that default. | |
| transformers.utils.logging.set_verbosity_info() | |
| log_level = training_args.get_process_log_level() | |
| logger.setLevel(log_level) | |
| transformers.utils.logging.set_verbosity(log_level) | |
| transformers.utils.logging.enable_default_handler() | |
| transformers.utils.logging.enable_explicit_format() | |
| # Log on each process the small summary: | |
| logger.warning( | |
| f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " | |
| + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" | |
| ) | |
| logger.info(f"Training/evaluation parameters {training_args}") | |
| # Detecting last checkpoint. | |
| last_checkpoint = None | |
| if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: | |
| last_checkpoint = get_last_checkpoint(training_args.output_dir) | |
| if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: | |
| raise ValueError( | |
| f"Output directory ({training_args.output_dir}) already exists and is not empty. " | |
| "Use --overwrite_output_dir to overcome." | |
| ) | |
| elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: | |
| logger.info( | |
| f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " | |
| "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." | |
| ) | |
| # Load dataset | |
| # In distributed training, the load_dataset function guarantees that only one local process can concurrently | |
| # download the dataset. | |
| # TODO support datasets from local folders | |
| dataset = load_dataset( | |
| data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code | |
| ) | |
| # Rename column names to standardized names (only "image" and "label" need to be present) | |
| if "pixel_values" in dataset["train"].column_names: | |
| dataset = dataset.rename_columns({"pixel_values": "image"}) | |
| if "annotation" in dataset["train"].column_names: | |
| dataset = dataset.rename_columns({"annotation": "label"}) | |
| # If we don't have a validation split, split off a percentage of train as validation. | |
| data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split | |
| if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: | |
| split = dataset["train"].train_test_split(data_args.train_val_split) | |
| dataset["train"] = split["train"] | |
| dataset["validation"] = split["test"] | |
| # Prepare label mappings. | |
| # We'll include these in the model's config to get human readable labels in the Inference API. | |
| if data_args.dataset_name == "scene_parse_150": | |
| repo_id = "huggingface/label-files" | |
| filename = "ade20k-id2label.json" | |
| else: | |
| repo_id = data_args.dataset_name | |
| filename = "id2label.json" | |
| id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"))) | |
| id2label = {int(k): v for k, v in id2label.items()} | |
| label2id = {v: str(k) for k, v in id2label.items()} | |
| # Load the mean IoU metric from the evaluate package | |
| metric = evaluate.load("mean_iou", cache_dir=model_args.cache_dir) | |
| # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a | |
| # predictions and label_ids field) and has to return a dictionary string to float. | |
| def compute_metrics(eval_pred): | |
| logits, labels = eval_pred | |
| logits_tensor = torch.from_numpy(logits) | |
| # scale the logits to the size of the label | |
| logits_tensor = nn.functional.interpolate( | |
| logits_tensor, | |
| size=labels.shape[-2:], | |
| mode="bilinear", | |
| align_corners=False, | |
| ).argmax(dim=1) | |
| pred_labels = logits_tensor.detach().cpu().numpy() | |
| metrics = metric.compute( | |
| predictions=pred_labels, | |
| references=labels, | |
| num_labels=len(id2label), | |
| ignore_index=0, | |
| reduce_labels=image_processor.do_reduce_labels, | |
| ) | |
| # add per category metrics as individual key-value pairs | |
| per_category_accuracy = metrics.pop("per_category_accuracy").tolist() | |
| per_category_iou = metrics.pop("per_category_iou").tolist() | |
| metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) | |
| metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) | |
| return metrics | |
| config = AutoConfig.from_pretrained( | |
| model_args.config_name or model_args.model_name_or_path, | |
| label2id=label2id, | |
| id2label=id2label, | |
| cache_dir=model_args.cache_dir, | |
| revision=model_args.model_revision, | |
| token=model_args.token, | |
| trust_remote_code=model_args.trust_remote_code, | |
| ) | |
| model = AutoModelForSemanticSegmentation.from_pretrained( | |
| model_args.model_name_or_path, | |
| from_tf=bool(".ckpt" in model_args.model_name_or_path), | |
| config=config, | |
| cache_dir=model_args.cache_dir, | |
| revision=model_args.model_revision, | |
| token=model_args.token, | |
| trust_remote_code=model_args.trust_remote_code, | |
| ) | |
| image_processor = AutoImageProcessor.from_pretrained( | |
| model_args.image_processor_name or model_args.model_name_or_path, | |
| do_reduce_labels=data_args.do_reduce_labels, | |
| cache_dir=model_args.cache_dir, | |
| revision=model_args.model_revision, | |
| token=model_args.token, | |
| trust_remote_code=model_args.trust_remote_code, | |
| ) | |
| # Define transforms to be applied to each image and target. | |
| if "shortest_edge" in image_processor.size: | |
| # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. | |
| height, width = image_processor.size["shortest_edge"], image_processor.size["shortest_edge"] | |
| else: | |
| height, width = image_processor.size["height"], image_processor.size["width"] | |
| train_transforms = A.Compose( | |
| [ | |
| A.Lambda( | |
| name="reduce_labels", | |
| mask=reduce_labels_transform if data_args.do_reduce_labels else None, | |
| p=1.0, | |
| ), | |
| # pad image with 255, because it is ignored by loss | |
| A.PadIfNeeded(min_height=height, min_width=width, border_mode=0, value=255, p=1.0), | |
| A.RandomCrop(height=height, width=width, p=1.0), | |
| A.HorizontalFlip(p=0.5), | |
| A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), | |
| ToTensorV2(), | |
| ] | |
| ) | |
| val_transforms = A.Compose( | |
| [ | |
| A.Lambda( | |
| name="reduce_labels", | |
| mask=reduce_labels_transform if data_args.do_reduce_labels else None, | |
| p=1.0, | |
| ), | |
| A.Resize(height=height, width=width, p=1.0), | |
| A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), | |
| ToTensorV2(), | |
| ] | |
| ) | |
| def preprocess_batch(example_batch, transforms: A.Compose): | |
| pixel_values = [] | |
| labels = [] | |
| for image, target in zip(example_batch["image"], example_batch["label"]): | |
| transformed = transforms(image=np.array(image.convert("RGB")), mask=np.array(target)) | |
| pixel_values.append(transformed["image"]) | |
| labels.append(transformed["mask"]) | |
| encoding = {} | |
| encoding["pixel_values"] = torch.stack(pixel_values).to(torch.float) | |
| encoding["labels"] = torch.stack(labels).to(torch.long) | |
| return encoding | |
| # Preprocess function for dataset should have only one argument, | |
| # so we use partial to pass the transforms | |
| preprocess_train_batch_fn = partial(preprocess_batch, transforms=train_transforms) | |
| preprocess_val_batch_fn = partial(preprocess_batch, transforms=val_transforms) | |
| if training_args.do_train: | |
| if "train" not in dataset: | |
| raise ValueError("--do_train requires a train dataset") | |
| if data_args.max_train_samples is not None: | |
| dataset["train"] = ( | |
| dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) | |
| ) | |
| # Set the training transforms | |
| dataset["train"].set_transform(preprocess_train_batch_fn) | |
| if training_args.do_eval: | |
| if "validation" not in dataset: | |
| raise ValueError("--do_eval requires a validation dataset") | |
| if data_args.max_eval_samples is not None: | |
| dataset["validation"] = ( | |
| dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) | |
| ) | |
| # Set the validation transforms | |
| dataset["validation"].set_transform(preprocess_val_batch_fn) | |
| # Initialize our trainer | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=dataset["train"] if training_args.do_train else None, | |
| eval_dataset=dataset["validation"] if training_args.do_eval else None, | |
| compute_metrics=compute_metrics, | |
| processing_class=image_processor, | |
| data_collator=default_data_collator, | |
| ) | |
| # Training | |
| if training_args.do_train: | |
| checkpoint = None | |
| if training_args.resume_from_checkpoint is not None: | |
| checkpoint = training_args.resume_from_checkpoint | |
| elif last_checkpoint is not None: | |
| checkpoint = last_checkpoint | |
| train_result = trainer.train(resume_from_checkpoint=checkpoint) | |
| trainer.save_model() | |
| trainer.log_metrics("train", train_result.metrics) | |
| trainer.save_metrics("train", train_result.metrics) | |
| trainer.save_state() | |
| # Evaluation | |
| if training_args.do_eval: | |
| metrics = trainer.evaluate() | |
| trainer.log_metrics("eval", metrics) | |
| trainer.save_metrics("eval", metrics) | |
| # Write model card and (optionally) push to hub | |
| kwargs = { | |
| "finetuned_from": model_args.model_name_or_path, | |
| "dataset": data_args.dataset_name, | |
| "tags": ["image-segmentation", "vision"], | |
| } | |
| if training_args.push_to_hub: | |
| trainer.push_to_hub(**kwargs) | |
| else: | |
| trainer.create_model_card(**kwargs) | |
| if __name__ == "__main__": | |
| main() | |