repo_id
stringlengths
15
86
file_path
stringlengths
28
180
content
stringlengths
1
1.75M
__index_level_0__
int64
0
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh
# Sample script to finetune RAG using Ray for distributed retrieval. # Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" #creates the custom knowlegebase python use_own_knowledge_dataset.py \ --csv_path /DIR/SQUAD-KB/squad-kb.csv \ --output_dir /DIR/SQUAD-KB # Start a single-node Ray cluster. ray start --head # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path # run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options python finetune_rag.py \ --data_dir /DIR/squad-training-data \ --output_dir /DIR/model_checkpoints \ --model_name_or_path facebook/rag-token-base \ --model_type rag_token \ --fp16 \ --gpus 2 \ --profile \ --do_train \ --end2end \ --do_predict \ --n_val -1 \ --train_batch_size 4 \ --eval_batch_size 1 \ --max_source_length 128 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-05 \ --num_train_epochs 10 \ --warmup_steps 500 \ --gradient_accumulation_steps 8 \ --distributed_retriever ray \ --num_retrieval_workers 4 \ --passages_path /DIR/SQUAD-KB/my_knowledge_dataset \ --index_path /DIR/SQUAD-KB/my_knowledge_dataset_hnsw_index.faiss \ --index_name custom \ --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ --csv_path /DIR/SQUAD-KB/squad-kb.csv \ --index_gpus 1 \ --gpu_order [5,6,7,8,9,0,1,2,3,4] \ --shard_dir ./test_dir/kb-shards \ --indexing_freq 500 # Stop the Ray cluster. ray stop #this script was used to test the SQuAD data. #change the dir paramater acording to your prefernece. #please use the same device ordere when running CUDA_VISIBLE_DEVICES=5,6,7,8,9,0,1,2,3,4 sh finetune_rag_ray_end2end.sh
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/eval_rag.py
""" Evaluation script for RAG models.""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, f1_score # noqa: E402 # isort:skip logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def infer_model_type(model_name_or_path): if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def metric_max_over_ground_truths(metric_fn, prediction, ground_truths): return max(metric_fn(prediction, gt) for gt in ground_truths) def get_scores(args, preds_path, gold_data_path): hypos = [line.strip() for line in open(preds_path, "r").readlines()] answers = [] if args.gold_data_mode == "qa": data = pd.read_csv(gold_data_path, sep="\t", header=None) for answer_list in data[1]: ground_truths = ast.literal_eval(answer_list) answers.append(ground_truths) else: references = [line.strip() for line in open(gold_data_path, "r").readlines()] answers = [[reference] for reference in references] f1 = em = total = 0 for prediction, ground_truths in zip(hypos, answers): total += 1 em += metric_max_over_ground_truths(exact_match_score, prediction, ground_truths) f1 += metric_max_over_ground_truths(f1_score, prediction, ground_truths) em = 100.0 * em / total f1 = 100.0 * f1 / total logger.info(f"F1: {f1:.2f}") logger.info(f"EM: {em:.2f}") def get_precision_at_k(args, preds_path, gold_data_path): k = args.k hypos = [line.strip() for line in open(preds_path, "r").readlines()] references = [line.strip() for line in open(gold_data_path, "r").readlines()] em = total = 0 for hypo, reference in zip(hypos, references): hypo_provenance = set(hypo.split("\t")[:k]) ref_provenance = set(reference.split("\t")) total += 1 em += len(hypo_provenance & ref_provenance) / k em = 100.0 * em / total logger.info(f"Precision@{k}: {em: .2f}") def evaluate_batch_retrieval(args, rag_model, questions): def strip_title(title): if title.startswith('"'): title = title[1:] if title.endswith('"'): title = title[:-1] return title retriever_input_ids = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( questions, return_tensors="pt", padding=True, truncation=True, )["input_ids"].to(args.device) question_enc_outputs = rag_model.rag.question_encoder(retriever_input_ids) question_enc_pool_output = question_enc_outputs[0] result = rag_model.retriever( retriever_input_ids, question_enc_pool_output.cpu().detach().to(torch.float32).numpy(), prefix=rag_model.rag.generator.config.prefix, n_docs=rag_model.config.n_docs, return_tensors="pt", ) all_docs = rag_model.retriever.index.get_doc_dicts(result.doc_ids) provenance_strings = [] for docs in all_docs: provenance = [strip_title(title) for title in docs["title"]] provenance_strings.append("\t".join(provenance)) return provenance_strings def evaluate_batch_e2e(args, rag_model, questions): with torch.no_grad(): inputs_dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( questions, return_tensors="pt", padding=True, truncation=True ) input_ids = inputs_dict.input_ids.to(args.device) attention_mask = inputs_dict.attention_mask.to(args.device) outputs = rag_model.generate( # rag_model overwrites generate input_ids, attention_mask=attention_mask, num_beams=args.num_beams, min_length=args.min_length, max_length=args.max_length, early_stopping=False, num_return_sequences=1, bad_words_ids=[[0, 0]], # BART likes to repeat BOS tokens, dont allow it to generate more than one ) answers = rag_model.retriever.generator_tokenizer.batch_decode(outputs, skip_special_tokens=True) if args.print_predictions: for q, a in zip(questions, answers): logger.info("Q: {} - A: {}".format(q, a)) return answers def get_args(): parser = argparse.ArgumentParser() parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token", "bart"], type=str, help=( "RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the" " model_name_or_path" ), ) parser.add_argument( "--index_name", default=None, choices=["exact", "compressed", "legacy"], type=str, help="RAG model retriever type", ) parser.add_argument( "--index_path", default=None, type=str, help="Path to the retrieval index", ) parser.add_argument("--n_docs", default=5, type=int, help="Number of retrieved docs") parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained checkpoints or model identifier from huggingface.co/models", ) parser.add_argument( "--eval_mode", choices=["e2e", "retrieval"], default="e2e", type=str, help=( "Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates" " precision@k." ), ) parser.add_argument("--k", default=1, type=int, help="k for the precision@k calculation") parser.add_argument( "--evaluation_set", default=None, type=str, required=True, help="Path to a file containing evaluation samples", ) parser.add_argument( "--gold_data_path", default=None, type=str, required=True, help="Path to a tab-separated file with gold samples", ) parser.add_argument( "--gold_data_mode", default="qa", type=str, choices=["qa", "ans"], help=( "Format of the gold data file" "qa - a single line in the following format: question [tab] answer_list" "ans - a single line of the gold file contains the expected answer string" ), ) parser.add_argument( "--predictions_path", type=str, default="predictions.txt", help="Name of the predictions file, to be stored in the checkpoints directory", ) parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument( "--eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.", ) parser.add_argument( "--recalculate", help="Recalculate predictions even if the prediction file exists", action="store_true", ) parser.add_argument( "--num_beams", default=4, type=int, help="Number of beams to be used when generating answers", ) parser.add_argument("--min_length", default=1, type=int, help="Min length of the generated answers") parser.add_argument("--max_length", default=50, type=int, help="Max length of the generated answers") parser.add_argument( "--print_predictions", action="store_true", help="If True, prints predictions while evaluating.", ) parser.add_argument( "--print_docs", action="store_true", help="If True, prints docs retried while generating.", ) args = parser.parse_args() args.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") return args def main(args): model_kwargs = {} if args.model_type is None: args.model_type = infer_model_type(args.model_name_or_path) assert args.model_type is not None if args.model_type.startswith("rag"): model_class = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration model_kwargs["n_docs"] = args.n_docs if args.index_name is not None: model_kwargs["index_name"] = args.index_name if args.index_path is not None: model_kwargs["index_path"] = args.index_path else: model_class = BartForConditionalGeneration checkpoints = ( [f.path for f in os.scandir(args.model_name_or_path) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("Evaluate the following checkpoints: %s", checkpoints) score_fn = get_scores if args.eval_mode == "e2e" else get_precision_at_k evaluate_batch_fn = evaluate_batch_e2e if args.eval_mode == "e2e" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path) and (not args.recalculate): logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path)) score_fn(args, args.predictions_path, args.gold_data_path) continue logger.info("***** Running evaluation for {} *****".format(checkpoint)) logger.info(" Batch size = %d", args.eval_batch_size) logger.info(" Predictions will be stored under {}".format(args.predictions_path)) if args.model_type.startswith("rag"): retriever = RagRetriever.from_pretrained(checkpoint, **model_kwargs) model = model_class.from_pretrained(checkpoint, retriever=retriever, **model_kwargs) model.retriever.init_retrieval() else: model = model_class.from_pretrained(checkpoint, **model_kwargs) model.to(args.device) with open(args.evaluation_set, "r") as eval_file, open(args.predictions_path, "w") as preds_file: questions = [] for line in tqdm(eval_file): questions.append(line.strip()) if len(questions) == args.eval_batch_size: answers = evaluate_batch_fn(args, model, questions) preds_file.write("\n".join(answers) + "\n") preds_file.flush() questions = [] if len(questions) > 0: answers = evaluate_batch_fn(args, model, questions) preds_file.write("\n".join(answers)) preds_file.flush() score_fn(args, args.predictions_path, args.gold_data_path) if __name__ == "__main__": args = get_args() main(args)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/callbacks_rag.py
import logging from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint from pytorch_lightning.utilities import rank_zero_only from utils_rag import save_json def count_trainable_parameters(model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params logger = logging.getLogger(__name__) def get_checkpoint_callback(output_dir, metric): """Saves the best model by validation EM score.""" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" elif metric == "em": exp = "{val_avg_em:.4f}-{step_count}" elif metric == "loss": exp = "{val_avg_loss:.4f}-{step_count}" else: raise NotImplementedError( f"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this" " function." ) checkpoint_callback = ModelCheckpoint( dirpath=output_dir, filename=exp, monitor=f"val_{metric}", mode="max", save_top_k=1, every_n_epochs=1, # works only with PL > 1.3 ) return checkpoint_callback def get_early_stopping_callback(metric, patience): return EarlyStopping( monitor=f"val_{metric}", # does this need avg? mode="min" if "loss" in metric else "max", patience=patience, verbose=True, ) class Seq2SeqLoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(lrs) @rank_zero_only def _write_logs( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") metrics = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) # Log results od = Path(pl_module.hparams.output_dir) if type_path == "test": results_file = od / "test_results.txt" generations_file = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=True) generations_file.parent.mkdir(exist_ok=True) with open(results_file, "a+") as writer: for key in sorted(metrics): if key in ["log", "progress_bar", "preds"]: continue val = metrics[key] if isinstance(val, torch.Tensor): val = val.item() msg = f"{key}: {val:.6f}\n" writer.write(msg) if not save_generations: return if "preds" in metrics: content = "\n".join(metrics["preds"]) generations_file.open("w+").write(content) @rank_zero_only def on_train_start(self, trainer, pl_module): try: npars = pl_module.model.model.num_parameters() except AttributeError: npars = pl_module.model.num_parameters() n_trainable_pars = count_trainable_parameters(pl_module) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) @rank_zero_only def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): save_json(pl_module.metrics, pl_module.metrics_save_path) return self._write_logs(trainer, pl_module, "test") @rank_zero_only def on_validation_end(self, trainer: pl.Trainer, pl_module): save_json(pl_module.metrics, pl_module.metrics_save_path) # Uncommenting this will save val generations # return self._write_logs(trainer, pl_module, "valid")
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/use_own_knowledge_dataset.py
import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser logger = logging.getLogger(__name__) torch.set_grad_enabled(False) device = "cuda" if torch.cuda.is_available() else "cpu" def split_text(text: str, n=100, character=" ") -> List[str]: """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] def split_documents(documents: dict) -> dict: """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed(documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def main( rag_example_args: "RagExampleArguments", processing_args: "ProcessingArguments", index_hnsw_args: "IndexHnswArguments", ): ###################################### logger.info("Step 1 - Create the dataset") ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file" # You can load a Dataset object this way dataset = load_dataset( "csv", data_files=[rag_example_args.csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words dataset = dataset.map(split_documents, batched=True, num_proc=processing_args.num_proc) # And compute the embeddings ctx_encoder = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=device) ctx_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name) new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = dataset.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=ctx_tokenizer), batched=True, batch_size=processing_args.batch_size, features=new_features, ) # And finally save your dataset passages_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset") dataset.save_to_disk(passages_path) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("Step 2 - Index the dataset") ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search index = faiss.IndexHNSWFlat(index_hnsw_args.d, index_hnsw_args.m, faiss.METRIC_INNER_PRODUCT) dataset.add_faiss_index("embeddings", custom_index=index) # And save the index index_path = os.path.join(rag_example_args.output_dir, "my_knowledge_dataset_hnsw_index.faiss") dataset.get_index("embeddings").save(index_path) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class RagExampleArguments: csv_path: str = field( default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv"), metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"}, ) question: Optional[str] = field( default=None, metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."}, ) rag_model_name: str = field( default="facebook/rag-sequence-nq", metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"}, ) dpr_ctx_encoder_model_name: str = field( default="facebook/dpr-ctx_encoder-multiset-base", metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) }, ) output_dir: Optional[str] = field( default=str(Path(__file__).parent / "test_run" / "dummy-kb"), metadata={"help": "Path to a directory where the dataset passages and the index will be saved"}, ) @dataclass class ProcessingArguments: num_proc: Optional[int] = field( default=None, metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." }, ) batch_size: int = field( default=16, metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." }, ) @dataclass class IndexHnswArguments: d: int = field( default=768, metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."}, ) m: int = field( default=128, metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) }, ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) parser = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) rag_example_args, processing_args, index_hnsw_args = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: rag_example_args.output_dir = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/utils_rag.py
import itertools import json import linecache import os import pickle import re import socket import string from collections import Counter from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List import git import torch from torch.utils.data import Dataset from transformers import BartTokenizer, RagTokenizer, T5Tokenizer def encode_line(tokenizer, line, max_length, padding_side, pad_to_max_length=True, return_tensors="pt"): extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) and not line.startswith(" ") else {} tokenizer.padding_side = padding_side return tokenizer( [line], max_length=max_length, padding="max_length" if pad_to_max_length else None, truncation=True, return_tensors=return_tensors, add_special_tokens=True, **extra_kw, ) def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class Seq2SeqDataset(Dataset): def __init__( self, tokenizer, data_dir, max_source_length, max_target_length, type_path="train", n_obs=None, src_lang=None, tgt_lang=None, prefix="", ): super().__init__() self.src_file = Path(data_dir).joinpath(type_path + ".source") self.tgt_file = Path(data_dir).joinpath(type_path + ".target") self.src_lens = self.get_char_lens(self.src_file) self.max_source_length = max_source_length self.max_target_length = max_target_length assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" self.tokenizer = tokenizer self.prefix = prefix if n_obs is not None: self.src_lens = self.src_lens[:n_obs] self.src_lang = src_lang self.tgt_lang = tgt_lang def __len__(self): return len(self.src_lens) def __getitem__(self, index) -> Dict[str, torch.Tensor]: index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" # Need to add eos token manually for T5 if isinstance(self.tokenizer, T5Tokenizer): source_line += self.tokenizer.eos_token tgt_line += self.tokenizer.eos_token # Pad source and target to the right source_tokenizer = ( self.tokenizer.question_encoder if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer ) target_tokenizer = self.tokenizer.generator if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer source_inputs = encode_line(source_tokenizer, source_line, self.max_source_length, "right") target_inputs = encode_line(target_tokenizer, tgt_line, self.max_target_length, "right") source_ids = source_inputs["input_ids"].squeeze() target_ids = target_inputs["input_ids"].squeeze() src_mask = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "decoder_input_ids": target_ids, } @staticmethod def get_char_lens(data_file): return [len(x) for x in Path(data_file).open().readlines()] def collate_fn(self, batch) -> Dict[str, torch.Tensor]: input_ids = torch.stack([x["input_ids"] for x in batch]) masks = torch.stack([x["attention_mask"] for x in batch]) target_ids = torch.stack([x["decoder_input_ids"] for x in batch]) tgt_pad_token_id = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) src_pad_token_id = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) y = trim_batch(target_ids, tgt_pad_token_id) source_ids, source_mask = trim_batch(input_ids, src_pad_token_id, attention_mask=masks) batch = { "input_ids": source_ids, "attention_mask": source_mask, "decoder_input_ids": y, } return batch logger = getLogger(__name__) def flatten_list(summary_ids: List[List]): return list(itertools.chain.from_iterable(summary_ids)) def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json")) def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, **json_dump_kwargs) def load_json(path): with open(path) as f: return json.load(f) def get_git_info(): repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), "hostname": str(socket.gethostname()), } return repo_infos def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f) def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" def remove_articles(text): return re.sub(r"\b(a|an|the)\b", " ", text) def white_space_fix(text): return " ".join(text.split()) def remove_punc(text): exclude = set(string.punctuation) return "".join(ch for ch in text if ch not in exclude) def lower(text): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(s)))) def f1_score(prediction, ground_truth): prediction_tokens = normalize_answer(prediction).split() ground_truth_tokens = normalize_answer(ground_truth).split() common = Counter(prediction_tokens) & Counter(ground_truth_tokens) num_same = sum(common.values()) if num_same == 0: return 0 precision = 1.0 * num_same / len(prediction_tokens) recall = 1.0 * num_same / len(ground_truth_tokens) f1 = (2 * precision * recall) / (precision + recall) return f1 def exact_match_score(prediction, ground_truth): return normalize_answer(prediction) == normalize_answer(ground_truth) def calculate_exact_match(output_lns: List[str], reference_lns: List[str]) -> Dict: assert len(output_lns) == len(reference_lns) em = 0 for hypo, pred in zip(output_lns, reference_lns): em += exact_match_score(hypo, pred) if len(output_lns) > 0: em /= len(output_lns) return {"em": em} def is_rag_model(model_prefix): return model_prefix.startswith("rag") def set_extra_model_params(extra_params, hparams, config): equivalent_param = {p: p for p in extra_params} # T5 models don't have `dropout` param, they have `dropout_rate` instead equivalent_param["dropout"] = "dropout_rate" for p in extra_params: if getattr(hparams, p, None): if not hasattr(config, p) and not hasattr(config, equivalent_param[p]): logger.info("config doesn't have a `{}` attribute".format(p)) delattr(hparams, p) continue set_p = p if hasattr(config, p) else equivalent_param[p] setattr(config, set_p, getattr(hparams, p)) delattr(hparams, p) return hparams, config
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/kb_encode_utils.py
import os from functools import partial from glob import glob import faiss from datasets import Features, Sequence, Value, concatenate_datasets, load_dataset, load_from_disk from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast def split_text(text, n=100, character=" "): """Split the text every ``n``-th occurrence of ``character``""" text = text.split(character) return [character.join(text[i : i + n]).strip() for i in range(0, len(text), n)] def split_documents(documents): """Split documents into passages""" titles, texts = [], [] for title, text in zip(documents["title"], documents["text"]): if text is not None: for passage in split_text(text): titles.append(title if title is not None else "") texts.append(passage) return {"title": titles, "text": texts} def embed_update(ctx_encoder, total_processes, device, process_num, shard_dir, csv_path): kb_dataset = load_dataset( "csv", data_files=[csv_path], split="train", delimiter="\t", column_names=["title", "text"] ) kb_dataset = kb_dataset.map( split_documents, batched=True, num_proc=1 ) # if you want you can load already splitted csv. kb_list = [kb_dataset.shard(total_processes, i, contiguous=True) for i in range(total_processes)] data_shrad = kb_list[process_num] arrow_folder = "data_" + str(process_num) passages_path = os.path.join(shard_dir, arrow_folder) context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained("facebook/dpr-ctx_encoder-multiset-base") ctx_encoder = ctx_encoder.to(device=device) def embed( documents: dict, ctx_encoder: DPRContextEncoder, ctx_tokenizer: DPRContextEncoderTokenizerFast, device ) -> dict: """Compute the DPR embeddings of document passages""" input_ids = ctx_tokenizer( documents["title"], documents["text"], truncation=True, padding="longest", return_tensors="pt" )["input_ids"] embeddings = ctx_encoder(input_ids.to(device=device), return_dict=True).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} new_features = Features( {"text": Value("string"), "title": Value("string"), "embeddings": Sequence(Value("float32"))} ) # optional, save as float32 instead of float64 to save space dataset = data_shrad.map( partial(embed, ctx_encoder=ctx_encoder, ctx_tokenizer=context_tokenizer, device=device), batched=True, batch_size=16, features=new_features, ) dataset.save_to_disk(passages_path) def add_index(shard_dir, index_path): data_shard_list = [] for shard_address in glob(str(shard_dir) + "/*/"): data_shard_list.append(load_from_disk(shard_address)) concat = concatenate_datasets(data_shard_list) faiss.omp_set_num_threads(96) index = faiss.IndexHNSWFlat(768, 128, faiss.METRIC_INNER_PRODUCT) concat.add_faiss_index("embeddings", custom_index=index) concat.get_index("embeddings").save( index_path ) # since we load the index in to memory,we can directly update the index in the disk
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/finetune_rag.py
"""Finetuning script for RAG models. Adapted from examples.seq2seq.finetune.py""" import argparse import copy import json import logging import multiprocessing import os import random import shutil import sys import time from collections import defaultdict from pathlib import Path from typing import Any, Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch import torch.distributed as dist from datasets import concatenate_datasets, load_from_disk from torch.utils.data import DataLoader from transformers import ( AutoConfig, AutoTokenizer, BartForConditionalGeneration, BatchEncoding, DPRConfig, DPRContextEncoder, DPRContextEncoderTokenizerFast, RagConfig, RagSequenceForGeneration, RagTokenForGeneration, RagTokenizer, T5ForConditionalGeneration, ) from transformers import logging as transformers_logging from transformers.integrations import is_ray_available if is_ray_available(): import ray from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever from glob import glob from callbacks_rag import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from kb_encode_utils import add_index, embed_update from lightning_base import BaseTransformer, add_generic_args, generic_train from pynvml import nvmlDeviceGetCount, nvmlDeviceGetHandleByIndex, nvmlDeviceGetMemoryInfo, nvmlInit from utils_rag import ( Seq2SeqDataset, calculate_exact_match, get_git_info, is_rag_model, lmap, pickle_save, save_git_info, save_json, set_extra_model_params, ) logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) transformers_logging.set_verbosity_info() sys.path.insert(2, str(Path(__file__).resolve().parents[1])) isEmUpdateBusy = False isAddIndexBusy = False processes = [] threadHandle_index = None class AttrDict(dict): def __init__(self, *args, **kwargs): super(AttrDict, self).__init__(*args, **kwargs) self.__dict__ = self class GenerativeQAModule(BaseTransformer): mode = "generative_qa" loss_names = ["loss"] metric_names = ["em"] val_metric = "em" def __init__(self, hparams, **kwargs): # when loading from a pytorch lightning checkpoint, hparams are passed as dict if isinstance(hparams, dict): hparams = AttrDict(hparams) if hparams.model_type == "rag_sequence": self.model_class = RagSequenceForGeneration elif hparams.model_type == "rag_token": self.model_class = RagTokenForGeneration elif hparams.model_type == "bart": self.model_class = BartForConditionalGeneration else: self.model_class = T5ForConditionalGeneration self.is_rag_model = is_rag_model(hparams.model_type) config_class = RagConfig if self.is_rag_model else AutoConfig config = config_class.from_pretrained(hparams.model_name_or_path) # set retriever parameters config.index_name = hparams.index_name or config.index_name config.passages_path = hparams.passages_path or config.passages_path config.index_path = hparams.index_path or config.index_path config.use_dummy_dataset = hparams.use_dummy_dataset # set extra_model_params for generator configs and load_model extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "attention_dropout", "dropout") if self.is_rag_model: if hparams.prefix is not None: config.generator.prefix = hparams.prefix config.label_smoothing = hparams.label_smoothing hparams, config.generator = set_extra_model_params(extra_model_params, hparams, config.generator) if hparams.distributed_retriever == "ray": # The Ray retriever needs the handles to the retriever actors. retriever = RagRayDistributedRetriever.from_pretrained( hparams.model_name_or_path, hparams.actor_handles, config=config ) if hparams.end2end: ctx_encoder_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained( "facebook/dpr-ctx_encoder-multiset-base" ) retriever.set_ctx_encoder_tokenizer(ctx_encoder_tokenizer) else: logger.info("please use RAY as the distributed retrieval method") model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config, retriever=retriever) if hparams.end2end: ctx_encoder = DPRContextEncoder.from_pretrained(hparams.context_encoder_name) model.set_context_encoder_for_training(ctx_encoder) prefix = config.question_encoder.prefix else: if hparams.prefix is not None: config.prefix = hparams.prefix hparams, config = set_extra_model_params(extra_model_params, hparams, config) model = self.model_class.from_pretrained(hparams.model_name_or_path, config=config) prefix = config.prefix tokenizer = ( RagTokenizer.from_pretrained(hparams.model_name_or_path) if self.is_rag_model else AutoTokenizer.from_pretrained(hparams.model_name_or_path) ) self.config_dpr = DPRConfig.from_pretrained(hparams.context_encoder_name) self.custom_config = hparams self.context_tokenizer = DPRContextEncoderTokenizerFast.from_pretrained(hparams.context_encoder_name) super().__init__(hparams, config=config, tokenizer=tokenizer, model=model) save_git_info(self.hparams.output_dir) self.output_dir = Path(self.hparams.output_dir) self.dpr_ctx_check_dir = str(Path(self.hparams.output_dir)) + "/dpr_ctx_checkpoint" self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.dataset_kwargs: dict = { "data_dir": self.hparams.data_dir, "max_source_length": self.hparams.max_source_length, "prefix": prefix or "", } n_observations_per_split = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.target_lens = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers self.distributed_port = self.hparams.distributed_port # For single GPU training, init_ddp_connection is not called. # So we need to initialize the retrievers here. if hparams.gpus <= 1: if hparams.distributed_retriever == "ray": self.model.retriever.init_retrieval() else: logger.info("please use RAY as the distributed retrieval method") self.distributed_retriever = hparams.distributed_retriever def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: source_ids, source_mask, target_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] rag_kwargs = {} if isinstance(self.model, T5ForConditionalGeneration): decoder_input_ids = self.model._shift_right(target_ids) lm_labels = target_ids elif isinstance(self.model, BartForConditionalGeneration): decoder_input_ids = target_ids[:, :-1].contiguous() lm_labels = target_ids[:, 1:].clone() else: assert self.is_rag_model generator = self.model.rag.generator if isinstance(generator, T5ForConditionalGeneration): decoder_start_token_id = generator.config.decoder_start_token_id decoder_input_ids = ( torch.cat( [torch.tensor([[decoder_start_token_id]] * target_ids.shape[0]).to(target_ids), target_ids], dim=1, ) if target_ids.shape[0] < self.target_lens["train"] else generator._shift_right(target_ids) ) elif isinstance(generator, BartForConditionalGeneration): decoder_input_ids = target_ids lm_labels = decoder_input_ids rag_kwargs["reduce_loss"] = True assert decoder_input_ids is not None outputs = self( source_ids, attention_mask=source_mask, decoder_input_ids=decoder_input_ids, use_cache=False, labels=lm_labels, **rag_kwargs, ) loss = outputs["loss"] return (loss,) @property def pad(self) -> int: raise NotImplementedError("pad not implemented") def training_step(self, batch, batch_idx) -> Dict: global isEmUpdateBusy # use to check whether the entire embedding update process is finished or not global isAddIndexBusy # use to check whether the entire indexing process is finished or not global processes # use to keep threads embedding update processes global threadHandle_index # use to keep thread in embedding indexing processes if (self.trainer.global_rank == 0) and (self.custom_config.end2end): if (not batch_idx == 0) and (batch_idx % self.custom_config.indexing_freq == 0): free_gpu_list = [] nvmlInit() deviceCount = nvmlDeviceGetCount() my_list = json.loads(self.custom_config.gpu_order) for i in range(deviceCount): handle = nvmlDeviceGetHandleByIndex(i) info = nvmlDeviceGetMemoryInfo(handle) if info.used / 1e6 < 15: position = my_list.index(i) free_gpu_list.append("cuda:" + str(position)) if len(free_gpu_list) >= self.custom_config.index_gpus: has_free_gpus = True else: has_free_gpus = False if (not isEmUpdateBusy) and has_free_gpus: model_copy = type(self.model.rag.ctx_encoder)( self.config_dpr ) # get a new instance #this will be load in the CPU model_copy.load_state_dict(self.model.rag.ctx_encoder.state_dict()) # copy weights processes = [] if len(free_gpu_list) > self.custom_config.index_gpus: cuda_devices = random.sample(free_gpu_list, self.custom_config.index_gpus) else: cuda_devices = free_gpu_list num_processes = len(cuda_devices) for rank in range(num_processes): logger.info("Iniitializing embedding calculation process rank{}".format(rank)) device = cuda_devices[rank] p = multiprocessing.Process( target=embed_update, args=( copy.deepcopy(model_copy), num_processes, device, rank, self.custom_config.shard_dir, self.custom_config.csv_path, ), ) processes.append(p) for p in processes: p.start() isEmUpdateBusy = True if isEmUpdateBusy and (not isAddIndexBusy): index_process_list = [processes[k].is_alive() for k in range(self.custom_config.index_gpus)] if ( sum(index_process_list) == 0 ): # If entire list is false, we can say all embedding calculation process has finished logger.info("Start adding the index") threadHandle_index = multiprocessing.Process( target=add_index, args=( self.custom_config.shard_dir, self.config.index_path, ), ) threadHandle_index.start() isAddIndexBusy = True # check when index building has started if isAddIndexBusy: # check still the index_building process is happening if not threadHandle_index.is_alive(): logger.info("Merging the dataset shards") saved_dataset_shards = [] for address in glob(str(self.custom_config.shard_dir) + "/*/"): saved_dataset_shards.append(load_from_disk(address)) concat = concatenate_datasets(saved_dataset_shards) concat.save_to_disk(self.config.passages_path) # here we update the main passage file on the disk logger.info("done updating the dataset") # To Do (@Aaron) : Useful in the future dynamic memory implementation. # if you load the index from the disk make sure to update the index file here, otherwise it is ok to update the index file from the worker. # logger.info("then updating the index") # shutil.copy(self.custom_config.temp_index, self.config.idex_path) logger.info("Loading new passages and iniitalzing new index") self.trainer.model.module.module.model.rag.retriever.re_load() self.trainer.model.module.module.model.rag.retriever.init_retrieval() isEmUpdateBusy = False isAddIndexBusy = False self.trainer.strategy.barrier("barrier") loss_tensors = self._step(batch) logs = dict(zip(self.loss_names, loss_tensors)) # tokens per batch tgt_pad_token_id = ( self.tokenizer.generator.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) src_pad_token_id = ( self.tokenizer.question_encoder.pad_token_id if isinstance(self.tokenizer, RagTokenizer) else self.tokenizer.pad_token_id ) logs["tpb"] = ( batch["input_ids"].ne(src_pad_token_id).sum() + batch["decoder_input_ids"].ne(tgt_pad_token_id).sum() ) self.log("loss", loss_tensors[0]) return loss_tensors[0] def validation_step(self, batch, batch_idx) -> Dict: return self._generative_step(batch) def validation_epoch_end(self, outputs, prefix="val") -> Dict: self.step_count += 1 losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} loss = losses["loss"] gen_metrics = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] } metrics_tensor: torch.FloatTensor = torch.tensor(gen_metrics[self.val_metric]).type_as(loss) gen_metrics.update({k: v.item() for k, v in losses.items()}) # fix for https://github.com/PyTorchLightning/pytorch-lightning/issues/2424 if dist.is_initialized(): dist.all_reduce(metrics_tensor, op=dist.ReduceOp.SUM) metrics_tensor = metrics_tensor / dist.get_world_size() gen_metrics.update({self.val_metric: metrics_tensor.item()}) losses.update(gen_metrics) metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} metrics["step_count"] = self.step_count self.save_metrics(metrics, prefix) # writes to self.metrics_save_path log_dict = { f"{prefix}_avg_em": metrics[f"{prefix}_avg_em"], "step_count": metrics["step_count"], f"{prefix}_avg_loss": metrics[f"{prefix}_avg_loss"], f"{prefix}_loss": loss, f"{prefix}_em": metrics_tensor, } self.log_dict(log_dict) def save_metrics(self, latest_metrics, type_path) -> None: self.metrics[type_path].append(latest_metrics) save_json(self.metrics, self.metrics_save_path) def calc_generative_metrics(self, preds, target) -> Dict: return calculate_exact_match(preds, target) def _generative_step(self, batch: dict) -> dict: start_time = time.time() batch = BatchEncoding(batch).to(device=self.model.device) generated_ids = self.model.generate( batch["input_ids"], attention_mask=batch["attention_mask"], do_deduplication=False, # rag specific parameter use_cache=True, min_length=1, max_length=self.target_lens["val"], ) gen_time = (time.time() - start_time) / batch["input_ids"].shape[0] preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["decoder_input_ids"]) # print(preds,target) loss_tensors = self._step(batch) base_metrics = dict(zip(self.loss_names, loss_tensors)) gen_metrics: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **gen_metrics) return base_metrics def test_step(self, batch, batch_idx): return self._generative_step(batch) def test_epoch_end(self, outputs): return self.validation_epoch_end(outputs, prefix="test") def get_dataset(self, type_path) -> Seq2SeqDataset: n_obs = self.n_obs[type_path] max_target_length = self.target_lens[type_path] dataset = Seq2SeqDataset( self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path) dataloader = DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, ) return dataloader def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) return dataloader def val_dataloader(self) -> DataLoader: return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) def test_dataloader(self) -> DataLoader: return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("checkpoint{}".format(self.step_count)) self.model.config.save_step = self.step_count # self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) if self.custom_config.end2end: modified_state_dict = self.model.state_dict() for key in self.model.state_dict().keys(): if key.split(".")[1] == "ctx_encoder": del modified_state_dict[key] self.model.save_pretrained(save_directory=save_path, state_dict=modified_state_dict) save_path_dpr = os.path.join(self.dpr_ctx_check_dir, "checkpoint{}".format(self.step_count)) self.model.rag.ctx_encoder.save_pretrained(save_path_dpr) self.context_tokenizer.save_pretrained(save_path_dpr) @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", default=25, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--val_max_target_length", default=25, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--test_max_target_length", default=25, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_val", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) parser.add_argument( "--prefix", type=str, default=None, help="Prefix added at the beginning of each text, typically used with T5-based models.", ) parser.add_argument( "--early_stopping_patience", type=int, default=-1, required=False, help=( "-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So" " val_check_interval will effect it." ), ) parser.add_argument( "--distributed-port", type=int, default=-1, required=False, help="Port number for distributed training." ) parser.add_argument( "--model_type", choices=["rag_sequence", "rag_token", "bart", "t5"], type=str, help=( "RAG model type: sequence or token, if none specified, the type is inferred from the" " model_name_or_path" ), ) parser.add_argument( "--context_encoder_name", default="facebook/dpr-ctx_encoder-multiset-base", type=str, help="Name of the pre-trained context encoder checkpoint from the DPR", ) parser.add_argument( "--csv_path", default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv"), type=str, help="path of the raw KB csv", ) parser.add_argument("--end2end", action="store_true", help="whether to train the system end2end or not") parser.add_argument("--index_gpus", type=int, help="how many GPUs used in re-encoding process") parser.add_argument( "--shard_dir", type=str, default=str(Path(__file__).parent / "test_run" / "kb-shards"), help="directory used to keep temporary shards during the re-encode process", ) parser.add_argument( "--gpu_order", type=str, help=( "order of the GPU used during the fine-tuning. Used to finding free GPUs during the re-encode" " process. I do not have many GPUs :)" ), ) parser.add_argument("--indexing_freq", type=int, help="frequency of re-encode process") return parser @staticmethod def add_retriever_specific_args(parser): parser.add_argument( "--index_name", type=str, default=None, help=( "Name of the index to use: 'hf' for a canonical dataset from the datasets library (default), 'custom'" " for a local index, or 'legacy' for the orignal one)" ), ) parser.add_argument( "--passages_path", type=str, default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset"), help=( "Path to the dataset of passages for custom index. More info about custom indexes in the RagRetriever" " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" ), ) parser.add_argument( "--index_path", type=str, default=str(Path(__file__).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset_hnsw_index.faiss"), help=( "Path to the faiss index for custom index. More info about custom indexes in the RagRetriever" " documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" ), ) parser.add_argument( "--distributed_retriever", choices=["ray", "pytorch"], type=str, default="ray", help=( "What implementation to use for distributed retriever? If " "pytorch is selected, the index is loaded on training " "worker 0, and torch.distributed is used to handle " "communication between training worker 0, and the other " "training workers. If ray is selected, the Ray library is " "used to create load the index on separate processes, " "and Ray handles the communication between the training " "workers and the retrieval actors." ), ) parser.add_argument( "--use_dummy_dataset", type=bool, default=False, help=( "Whether to use the dummy version of the dataset index. More info about custom indexes in the" " RagRetriever documentation as well as in `examples/rag/use_own_knowledge_dataset.py`" ), ) return parser @staticmethod def add_ray_specific_args(parser): # Ray cluster address. parser.add_argument( "--ray-address", default="auto", type=str, help=( "The address of the Ray cluster to connect to. If not " "specified, Ray will attempt to automatically detect the " "cluster. Has no effect if pytorch is used as the distributed " "retriever." ), ) parser.add_argument( "--num_retrieval_workers", type=int, default=1, help=( "The number of retrieval actors to use when Ray is selected" "for the distributed retriever. Has no effect when " "distributed_retriever is set to pytorch." ), ) return parser def main(args=None, model=None) -> GenerativeQAModule: parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) parser = GenerativeQAModule.add_retriever_specific_args(parser) args = args or parser.parse_args() Path(args.output_dir).mkdir(exist_ok=True) Path(args.output_dir + "/dpr_ctx_checkpoint").mkdir( exist_ok=True ) # save dpr_context encoder seprately for the future use print(args.shard_dir) if os.path.exists(args.shard_dir): # we do not need previous kb shards used in dataset re-conding and re-indexing shutil.rmtree(args.shard_dir) Path(args.shard_dir).mkdir(exist_ok=True) if os.path.exists( args.cache_dir ): # we do not need previous cache files used in dataset re-conding and re-indexing shutil.rmtree(args.cache_dir) Path(args.cache_dir).mkdir(exist_ok=True) named_actors = [] if args.distributed_retriever == "ray" and args.gpus > 1: if not is_ray_available(): raise RuntimeError("Please install Ray to use the Ray distributed retriever.") # Connect to an existing Ray cluster. try: ray.init(address=args.ray_address, namespace="rag") except (ConnectionError, ValueError): logger.warning( "Connection to Ray cluster failed. Make sure a Ray" "cluster is running by either using Ray's cluster " "launcher (`ray up`) or by manually starting Ray on " "each node via `ray start --head` for the head node " "and `ray start --address='<ip address>:6379'` for " "additional nodes. See " "https://docs.ray.io/en/master/cluster/index.html " "for more info." ) raise # Create Ray actors only for rank 0. if ("LOCAL_RANK" not in os.environ or os.environ["LOCAL_RANK"] == 0) and ( "NODE_RANK" not in os.environ or os.environ["NODE_RANK"] == 0 ): remote_cls = ray.remote(RayRetriever) named_actors = [ remote_cls.options(name="retrieval_worker_{}".format(i)).remote() for i in range(args.num_retrieval_workers) ] else: logger.info( "Getting named actors for NODE_RANK {}, LOCAL_RANK {}".format( os.environ["NODE_RANK"], os.environ["LOCAL_RANK"] ) ) named_actors = [ray.get_actor("retrieval_worker_{}".format(i)) for i in range(args.num_retrieval_workers)] args.actor_handles = named_actors assert args.actor_handles == named_actors if model is None: model: GenerativeQAModule = GenerativeQAModule(args) dataset = Path(args.data_dir).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir).startswith("/tmp") or str(args.output_dir).startswith("/var") ): training_logger = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger project = os.environ.get("WANDB_PROJECT", dataset) training_logger = WandbLogger(name=model.output_dir.name, project=project) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger training_logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") es_callback = ( get_early_stopping_callback(model.val_metric, args.early_stopping_patience) if args.early_stopping_patience >= 0 else False ) trainer: pl.Trainer = generic_train( model, args, logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback(args.output_dir, model.val_metric), early_stopping_callback=es_callback, logger=training_logger, profiler=pl.profiler.AdvancedProfiler() if args.profile else None, ) pickle_save(model.hparams, model.output_dir / "hparams.pkl") if not args.do_predict: return model # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": multiprocessing.set_start_method("spawn") parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = GenerativeQAModule.add_model_specific_args(parser, os.getcwd()) parser = GenerativeQAModule.add_retriever_specific_args(parser) parser = GenerativeQAModule.add_ray_specific_args(parser) # Pytorch Lightning Profiler parser.add_argument( "--profile", action="store_true", help="If True, use pytorch_lightning.profiler.AdvancedProfiler to profile the Trainer.", ) args = parser.parse_args() main(args)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/lightning_base.py
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version logger = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") MODEL_MODES = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeq2SeqLM, "translation": AutoModelForSeq2SeqLM, } # update this and the import above to support new schedulers from transformers.optimization arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs, ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) ], # check this named paramters "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, stage): if stage == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=str(Path(__file__).parent / "test_run" / "cache"), type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class InitCallback(pl.Callback): # this process can also be done with PL ddp plugging. # But still it is experimental (check original RAG, I updated that with pluggin (shamanez)) def on_sanity_check_start(self, trainer, pl_module): if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class CheckParamCallback(pl.Callback): # check whether new added model paramters are differentiable def on_after_backward(self, trainer, pl_module): # print(pl_module.model.rag) for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(name) class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=str(Path(__file__).parent / "test_run" / "model_checkpoints"), type=str, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=str(Path(__file__).parent / "test_run" / "dummy-train-data"), type=str, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs, ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} if args.fp16: train_params["precision"] = 16 if args.gpus > 1: train_params["accelerator"] = "auto" train_params["strategy"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["profiler"] = None train_params["devices"] = "auto" trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback], logger=logger, val_check_interval=1, num_sanity_val_steps=2, **train_params, ) if args.do_train: trainer.fit(model) else: print("RAG modeling tests with new set functions successfuly executed!") return trainer
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/test_rag_new_features.sh
export PYTHONPATH="../":"${PYTHONPATH}" python use_own_knowledge_dataset.py ray start --head python finetune_rag.py \ --model_name_or_path facebook/rag-token-base \ --model_type rag_token \ --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ --fp16 \ --gpus 1 \ --profile \ --end2end \ --index_name custom ray stop
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/test_finetune.sh
# Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" #creates the custom knowlegebase python use_own_knowledge_dataset.py # Start a single-node Ray cluster. ray start --head # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path # run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options python finetune_rag.py \ --model_name_or_path facebook/rag-token-base \ --model_type rag_token \ --fp16 \ --gpus 2 \ --profile \ --do_train \ --end2end \ --do_predict \ --n_val -1 \ --train_batch_size 1 \ --eval_batch_size 1 \ --max_source_length 128 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-05 \ --num_train_epochs 10 \ --warmup_steps 500 \ --gradient_accumulation_steps 1 \ --distributed_retriever ray \ --num_retrieval_workers 4 \ --index_name custom \ --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ --index_gpus 2 \ --gpu_order [2,3,4,5,6,7,8,9,0,1] \ --indexing_freq 5 # Stop the Ray cluster. ray stop #CUDA_VISIBLE_DEVICES=2,3,4,5,6,7,8,9,0,1 sh ./test_run/test_finetune.sh #Make sure --gpu_order is same.
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-kb/my_knowledge_dataset.csv
Aaron Aaron Aaron ( or ; "Ahärôn") is a prophet, high priest, and the brother of Moses in the Abrahamic religions. Knowledge of Aaron, along with his brother Moses, comes exclusively from religious texts, such as the Bible and Quran. The Hebrew Bible relates that, unlike Moses, who grew up in the Egyptian royal court, Aaron and his elder sister Miriam remained with their kinsmen in the eastern border-land of Egypt (Goshen). When Moses first confronted the Egyptian king about the Israelites, Aaron served as his brother's spokesman ("prophet") to the Pharaoh. Part of the Law (Torah) that Moses received from God at Sinai granted Aaron the priesthood for himself and his male descendants, and he became the first High Priest of the Israelites. Aaron died before the Israelites crossed the North Jordan river and he was buried on Mount Hor (Numbers 33:39; Deuteronomy 10:6 says he died and was buried at Moserah). Aaron is also mentioned in the New Testament of the Bible. According to the Book of Exodus, Aaron first functioned as Moses' assistant. Because Moses complained that he could not speak well, God appointed Aaron as Moses' "prophet" (Exodus 4:10-17; 7:1). At the command of Moses, he let his rod turn into a snake. Then he stretched out his rod in order to bring on the first three plagues. After that, Moses tended to act and speak for himself. During the journey in the wilderness, Aaron was not always prominent or active. At the battle with Amalek, he was chosen with Hur to support the hand of Moses that held the "rod of God". When the revelation was given to Moses at biblical Mount Sinai, he headed the elders of Israel who accompanied Moses on the way to the summit. "Pokémon" Pokémon , also known as in Japan, is a media franchise managed by The Pokémon Company, a Japanese consortium between Nintendo, Game Freak, and Creatures. The franchise copyright is shared by all three companies, but Nintendo is the sole owner of the trademark. The franchise was created by Satoshi Tajiri in 1995, and is centered on fictional creatures called "Pokémon", which humans, known as Pokémon Trainers, catch and train to battle each other for sport. The English slogan for the franchise is "Gotta Catch 'Em All". Works within the franchise are set in the Pokémon universe. The franchise began as "Pokémon Red" and "Green" (released outside of Japan as "Pokémon Red" and "Blue"), a pair of video games for the original Game Boy that were developed by Game Freak and published by Nintendo in February 1996. "Pokémon" has since gone on to become the highest-grossing media franchise of all time, with over in revenue up until March 2017. The original video game series is the second best-selling video game franchise (behind Nintendo's "Mario" franchise) with more than 300million copies sold and over 800million mobile downloads. In addition, the "Pokémon" franchise includes the world's top-selling toy brand, the top-selling trading card game with over 25.7billion cards sold, an anime television series that has become the most successful video game adaptation with over 20 seasons and 1,000 episodes in 124 countries, as well as an anime film series, a , books, manga comics, music, and merchandise. The franchise is also represented in other Nintendo media, such as the "Super Smash Bros." series. In November 2005, 4Kids Entertainment, which had managed the non-game related licensing of "Pokémon", announced that it had agreed not to renew the "Pokémon" representation agreement. The Pokémon Company International oversees all "Pokémon" licensing outside Asia.
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.target
to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.source
What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ?
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/val.target
to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.target
to a snake Moses' assistant Egyptian royal court let his rod turn in to a snake The Pokémon Company Nintendo world's top-selling toy brand, the top-selling trading card game over 20 seasons
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/train.source
What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ? What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ? What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ? What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ? What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ? What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ?
0
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run
hf_public_repos/transformers/examples/research_projects/rag-end2end-retriever/test_run/dummy-train-data/test.source
What does Moses' rod turn into ? Who is Aron? Where did Moses grow up ? What happens at the command of the Moses ? Who manages the Pokémon ? Who owned the Pokémon trademark ? What else include in Pokémon franchise ? How many seasons in Pokémon animme series ?
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/performer/README.md
# Performer fine-tuning Example authors: @TevenLeScao, @Patrickvonplaten Paper authors: Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Davis, Afroz Mohiuddin, Lukasz Kaiser, David Belanger, Lucy Colwell, Adrian Weller ## Requirements `datasets`, `flax` and `jax`. `wandb` integration is built-in if you want to use it. ## Examples `sanity_script.sh` will launch performer fine-tuning from the bert-base-cased checkpoint on the Simple Wikipedia dataset (a small, easy-language English Wikipedia) from `datasets`. `full_script.sh` will launch performer fine-tuning from the bert-large-cased checkpoint on the English Wikipedia dataset from `datasets`. Here are a few key arguments: - Remove the `--performer` argument to use a standard Bert model. - Add `--reinitialize` to start from a blank model rather than a Bert checkpoint. - You may change the Bert size by passing a different [checkpoint](https://huggingface.co/transformers/pretrained_models.html) to the `--model_name_or_path` argument. - Passing your user name to the `--wandb_user_name` argument will trigger weights and biases logging. - You can choose a dataset with `--dataset_name` and `--dataset_config`. Our [viewer](https://huggingface.co/datasets/viewer/) will help you find what you need.
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/performer/modeling_flax_performer_utils.py
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ IMPORTANT: This code was copied from https://github.com/google-research/google-research/blob/master/performer/fast_self_attention/fast_self_attention.py on 6/11/2020. This is very new code, so it might be prone to change soon -> make sure to check the original code and update accordingly Core Fast Attention Module for Flax. Implementation of the approximate fast softmax and generalized attention mechanism leveraging structured random feature maps [RFM] techniques and low rank decomposition of the attention matrix. """ # pylint: disable=invalid-name, missing-function-docstring, line-too-long import abc import functools from collections.abc import Iterable # pylint: disable=g-importing-member import jax import jax.numpy as jnp import numpy as onp from absl import logging from jax import lax, random def nonnegative_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True, eps=0.0001 ): """ Constructs nonnegative kernel features for fast softmax attention Args: data: input for which features are computes projection_matrix: random matrix used to compute features attention_dims_t: tuple of attention dimensions batch_dims_t: tuple of batch dimensions precision: precision parameter is_query: predicate indicating whether input data corresponds to queries or keys normalize_data: predicate indicating whether data should be normalized, eps: numerical stabilizer Returns: Random features for fast softmax attention. """ del attention_dims_t if normalize_data: # We have e^{qk^T/sqrt{d}} = e^{q_norm k_norm^T}, where # w_norm = w * data_normalizer for w in {q,k}. data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) diag_data = jnp.square(data) diag_data = jnp.sum(diag_data, axis=data.ndim - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) if is_query: last_dims_t = (len(data_dash.shape) - 1,) data_dash = ratio * ( jnp.exp(data_dash - diag_data - jnp.max(data_dash, axis=last_dims_t, keepdims=True)) + eps ) else: data_dash = ratio * (jnp.exp(data_dash - diag_data - jnp.max(data_dash)) + eps) return data_dash def sincos_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data=True ): """ Constructs kernel sin-cos features for fast softmax attention Args: data: input for which features are computes projection_matrix: random matrix used to compute features attention_dims_t: tuple of attention dimensions batch_dims_t: tuple of batch dimensions precision: precision parameter normalize_data: predicate indicating whether data should be normalized Returns: Random features for fast softmax attention. """ if normalize_data: # We have: exp(qk^T/sqrt{d}) = exp(|q|^2/2sqrt{d}) * exp(|k|^2/2sqrt{d}) * # exp(-(|q*c-k*c|^2)/2), where c = 1.0 / sqrt{sqrt{d}}. data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 ratio = 1.0 / jnp.sqrt(projection_matrix.shape[0]) data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) data_dash_cos = ratio * jnp.cos(data_dash) data_dash_sin = ratio * jnp.sin(data_dash) data_dash = jnp.concatenate((data_dash_cos, data_dash_sin), axis=-1) # Constructing D_data and data^{'} diag_data = jnp.square(data) diag_data = jnp.sum(diag_data, axis=data.ndim - 1) diag_data = (diag_data / 2.0) * data_normalizer * data_normalizer diag_data = jnp.expand_dims(diag_data, axis=data.ndim - 1) # Additional renormalization for numerical stability data_renormalizer = jnp.max(diag_data, attention_dims_t, keepdims=True) diag_data -= data_renormalizer diag_data = jnp.exp(diag_data) data_prime = data_dash * diag_data return data_prime def generalized_kernel_feature_creator( data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data ): """ Constructs kernel features for fast generalized attention Args: data: input for which features are computes projection_matrix: matrix used to compute features batch_dims_t: tuple of batch dimensions precision: precision parameter kernel_fn: kernel function used kernel_epsilon: additive positive term added to every feature for numerical stability normalize_data: predicate indicating whether data should be normalized Returns: Random features for fast generalized attention. """ if normalize_data: data_normalizer = 1.0 / (jnp.sqrt(jnp.sqrt(data.shape[-1]))) else: data_normalizer = 1.0 if projection_matrix is None: return kernel_fn(data_normalizer * data) + kernel_epsilon else: data_mod_shape = data.shape[0 : len(batch_dims_t)] + projection_matrix.shape data_thick_random_matrix = jnp.zeros(data_mod_shape) + projection_matrix data_dash = lax.dot_general( data_normalizer * data, data_thick_random_matrix, (((data.ndim - 1,), (data_thick_random_matrix.ndim - 1,)), (batch_dims_t, batch_dims_t)), precision=precision, ) data_prime = kernel_fn(data_dash) + kernel_epsilon return data_prime def make_fast_softmax_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.000001, nb_features=256, ortho_features=True, ortho_scaling=0.0, redraw_features=True, unidirectional=False, nonnegative_features=True, lax_scan_unroll=1, ): """Construct a fast softmax attention method.""" logging.info( "Fast softmax attention: %s features and orthogonal=%s, renormalize=%s", nb_features, ortho_features, renormalize_attention, ) if ortho_features: matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=ortho_scaling) else: matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) if nonnegative_features: def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True ): return nonnegative_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data, numerical_stabilizer, ) else: def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=True ): del is_query return sincos_softmax_kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, normalize_data ) attention_fn = FastAttentionviaLowRankDecomposition( matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll, ).dot_product_attention return attention_fn def make_fast_generalized_attention( qkv_dim, renormalize_attention=True, numerical_stabilizer=0.0, nb_features=256, features_type="deterministic", kernel_fn=jax.nn.relu, kernel_epsilon=0.001, redraw_features=False, unidirectional=False, lax_scan_unroll=1, ): """Construct a fast generalized attention menthod.""" logging.info("Fast generalized attention.: %s features and renormalize=%s", nb_features, renormalize_attention) if features_type == "ortho": matrix_creator = functools.partial(GaussianOrthogonalRandomMatrix, nb_features, qkv_dim, scaling=False) elif features_type == "iid": matrix_creator = functools.partial(GaussianUnstructuredRandomMatrix, nb_features, qkv_dim) elif features_type == "deterministic": matrix_creator = None else: raise ValueError("Unknown feature value type") def kernel_feature_creator( data, projection_matrix, attention_dims_t, batch_dims_t, precision, is_query, normalize_data=False ): del attention_dims_t del is_query return generalized_kernel_feature_creator( data, projection_matrix, batch_dims_t, precision, kernel_fn, kernel_epsilon, normalize_data ) attention_fn = FastAttentionviaLowRankDecomposition( matrix_creator, kernel_feature_creator, renormalize_attention=renormalize_attention, numerical_stabilizer=numerical_stabilizer, redraw_features=redraw_features, unidirectional=unidirectional, lax_scan_unroll=lax_scan_unroll, ).dot_product_attention return attention_fn class RandomMatrix(object): r""" Abstract class providing a method for constructing 2D random arrays. Class is responsible for constructing 2D random arrays. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def get_2d_array(self): raise NotImplementedError("Abstract method") class GaussianUnstructuredRandomMatrix(RandomMatrix): def __init__(self, nb_rows, nb_columns, key): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key def get_2d_array(self): return random.normal(self.key, (self.nb_rows, self.nb_columns)) class GaussianOrthogonalRandomMatrix(RandomMatrix): r""" Class providing a method to create Gaussian orthogonal matrix. Class is responsible for constructing 2D Gaussian orthogonal arrays. """ def __init__(self, nb_rows, nb_columns, key, scaling=0): self.nb_rows = nb_rows self.nb_columns = nb_columns self.key = key self.scaling = scaling def get_2d_array(self): nb_full_blocks = int(self.nb_rows / self.nb_columns) block_list = [] rng = self.key for _ in range(nb_full_blocks): rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q) remaining_rows = self.nb_rows - nb_full_blocks * self.nb_columns if remaining_rows > 0: rng, rng_input = jax.random.split(rng) unstructured_block = random.normal(rng_input, (self.nb_columns, self.nb_columns)) q, _ = jnp.linalg.qr(unstructured_block) q = jnp.transpose(q) block_list.append(q[0:remaining_rows]) final_matrix = jnp.vstack(block_list) if self.scaling == 0: multiplier = jnp.linalg.norm(random.normal(self.key, (self.nb_rows, self.nb_columns)), axis=1) elif self.scaling == 1: multiplier = jnp.sqrt(float(self.nb_columns)) * jnp.ones((self.nb_rows)) else: raise ValueError("Scaling must be one of {0, 1}. Was %s" % self._scaling) return jnp.matmul(jnp.diag(multiplier), final_matrix) class FastAttention(object): r""" Abstract class providing a method for fast attention. Class is responsible for providing a method <dot_product_attention> for fast approximate attention. """ __metaclass__ = abc.ABCMeta @abc.abstractmethod def dot_product_attention( self, query, key, value, dtype=jnp.float32, bias=None, axis=None, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, ): """ Computes dot-product attention given query, key, and value. This is the core function for applying fast approximate dot-product attention. It calculates the attention weights given query and key and combines the values using the attention weights. This function supports multi-dimensional inputs Args: query: queries for calculating attention with shape of [batch_size, dim1, dim2, ..., dimN, num_heads, mem_channels]. key: keys for calculating attention with shape of [batch_size, dim1, dim2, ..., dimN, num_heads, mem_channels]. value: values to be used in attention with shape of [batch_size, dim1, dim2,..., dimN, num_heads, value_channels]. dtype: the dtype of the computation (default: float32) bias: bias for the attention weights. This can be used for incorporating autoregressive mask, padding mask, proximity bias. axis: axises over which the attention is applied. broadcast_dropout: bool: use a broadcasted dropout along batch dims. dropout_rng: JAX PRNGKey: to be used for dropout. dropout_rate: dropout rate. deterministic: bool, deterministic or not (to apply dropout). precision: numerical precision of the computation see `jax.lax.Precision` for details Returns: Output of shape [bs, dim1, dim2, ..., dimN,, num_heads, value_channels]. """ raise NotImplementedError("Abstract method") def _numerator(z_slice_shape, precision, unroll=1): def fwd(qs, ks, vs): def body(p, qkv): (q, k, v) = qkv p += jnp.einsum("...m,...d->...md", k, v, precision=precision) X_slice = jnp.einsum("...m,...md->...d", q, p, precision=precision) return p, X_slice init_value = jnp.zeros(z_slice_shape) p, W = lax.scan(body, init_value, (qs, ks, vs), unroll=unroll) return W, (p, qs, ks, vs) def bwd(pqkv, W_ct): def body(carry, qkv_xct): p, p_ct = carry q, k, v, x_ct = qkv_xct q_ct = jnp.einsum("...d,...md->...m", x_ct, p, precision=precision) p_ct += jnp.einsum("...d,...m->...md", x_ct, q, precision=precision) k_ct = jnp.einsum("...md,...d->...m", p_ct, v, precision=precision) v_ct = jnp.einsum("...md,...m->...d", p_ct, k, precision=precision) p -= jnp.einsum("...m,...d->...md", k, v, precision=precision) return (p, p_ct), (q_ct, k_ct, v_ct) p, qs, ks, vs = pqkv _, (qs_ct, ks_ct, vs_ct) = lax.scan( body, (p, jnp.zeros_like(p)), (qs, ks, vs, W_ct), reverse=True, unroll=unroll ) return qs_ct, ks_ct, vs_ct @jax.custom_vjp def _numerator_impl(qs, ks, vs): W, _ = fwd(qs, ks, vs) return W _numerator_impl.defvjp(fwd, bwd) return _numerator_impl def _denominator(t_slice_shape, precision, unroll=1): def fwd(qs, ks): def body(p, qk): q, k = qk p += k x = jnp.einsum("...m,...m->...", q, p, precision=precision) return p, x p = jnp.zeros(t_slice_shape) p, R = lax.scan(body, p, (qs, ks), unroll=unroll) return R, (qs, ks, p) def bwd(qkp, R_ct): def body(carry, qkx): p, p_ct = carry q, k, x_ct = qkx q_ct = jnp.einsum("...,...m->...m", x_ct, p, precision=precision) p_ct += jnp.einsum("...,...m->...m", x_ct, q, precision=precision) k_ct = p_ct p -= k return (p, p_ct), (q_ct, k_ct) qs, ks, p = qkp _, (qs_ct, ks_ct) = lax.scan(body, (p, jnp.zeros_like(p)), (qs, ks, R_ct), reverse=True, unroll=unroll) return (qs_ct, ks_ct) @jax.custom_vjp def _denominator_impl(qs, ks): R, _ = fwd(qs, ks) return R _denominator_impl.defvjp(fwd, bwd) return _denominator_impl class FastAttentionviaLowRankDecomposition(FastAttention): r""" Class providing a method for fast attention via low rank decomposition. Class is responsible for providing a method <dot_product_attention> for fast dot-product attention with the use of low rank decomposition (e.g. with random feature maps). """ def __init__( self, matrix_creator, kernel_feature_creator, renormalize_attention, numerical_stabilizer, redraw_features, unidirectional, lax_scan_unroll=1, ): # For optimal GPU performance, set to 16. rng = random.PRNGKey(0) self.matrix_creator = matrix_creator self.projection_matrix = self.draw_weights(rng) self.kernel_feature_creator = kernel_feature_creator self.renormalize_attention = renormalize_attention self.numerical_stabilizer = numerical_stabilizer self.redraw_features = redraw_features self.unidirectional = unidirectional self.lax_scan_unroll = lax_scan_unroll def draw_weights(self, key): if self.matrix_creator is None: return None matrixrng, _ = random.split(key) projection_matrix = self.matrix_creator(key=matrixrng).get_2d_array() return projection_matrix def dot_product_attention( self, query, key, value, dtype=jnp.float32, bias=None, axis=None, broadcast_dropout=True, dropout_rng=None, dropout_rate=0.0, deterministic=False, precision=None, ): assert key.shape[:-1] == value.shape[:-1] assert query.shape[0:1] == key.shape[0:1] and query.shape[-1] == key.shape[-1] if axis is None: axis = tuple(range(1, key.ndim - 2)) if not isinstance(axis, Iterable): axis = (axis,) assert key.ndim == query.ndim assert key.ndim == value.ndim for ax in axis: if not (query.ndim >= 3 and 1 <= ax < query.ndim - 2): raise ValueError("Attention axis must be between the batch axis and the last-two axes.") n = key.ndim # Constructing projection tensor. if self.redraw_features: # TODO(kchoro): Get rid of the constant below. query_seed = lax.convert_element_type(jnp.ceil(jnp.sum(query) * 10000000.0), jnp.int32) rng = random.PRNGKey(query_seed) self.projection_matrix = self.draw_weights(rng) # batch_dims is <bs, <non-attention dims>, num_heads> batch_dims = tuple(onp.delete(range(n), axis + (n - 1,))) # q & k -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) qk_perm = batch_dims + axis + (n - 1,) k_extra_perm = axis + batch_dims + (n - 1,) key_extra = key.transpose(k_extra_perm) key = key.transpose(qk_perm) query = query.transpose(qk_perm) # v -> (bs, <non-attention dims>, num_heads, <attention dims>, channels) v_perm = batch_dims + axis + (n - 1,) value = value.transpose(v_perm) batch_dims_t = tuple(range(len(batch_dims))) attention_dims_t = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) # Constructing tensors Q^{'} and K^{'}. query_prime = self.kernel_feature_creator( query, self.projection_matrix, attention_dims_t, batch_dims_t, precision, True ) key_prime = self.kernel_feature_creator( key, self.projection_matrix, attention_dims_t, batch_dims_t, precision, False ) if self.unidirectional: index = attention_dims_t[0] z_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) + (value.shape[-1],) numerator_fn = _numerator(z_slice_shape, precision, self.lax_scan_unroll) W = numerator_fn( jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0), jnp.moveaxis(value, index, 0) ) # Constructing W = (Q^{'}(K^{'})^{T})_{masked}V W = jnp.moveaxis(W, 0, index) if not self.renormalize_attention: # Unidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Unidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) index = attention_dims_t[0] t_slice_shape = key_prime.shape[0 : len(batch_dims_t)] + (key_prime.shape[-1],) denominator_fn = _denominator(t_slice_shape, precision, self.lax_scan_unroll) R = denominator_fn(jnp.moveaxis(query_prime, index, 0), jnp.moveaxis(key_prime, index, 0)) R = jnp.moveaxis(R, 0, index) else: contract_query = tuple(range(len(batch_dims) + len(axis), len(batch_dims) + len(axis) + 1)) contract_z = tuple(range(len(batch_dims), len(batch_dims) + 1)) # Constructing Z = (K^{'})^{T}V # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) Z = lax.dot_general( key_prime, value, ((attention_dims_t, attention_dims_t), (batch_dims_t, batch_dims_t)), precision=precision, ) # Constructing W = Q^{'}Z = Q^{'}(K^{'})^{T}V # q (bs, <non-attention dims>, num_heads, <attention dims>, channels_m) # Z (bs, <non-attention dims>, num_heads, channels_m, channels_v) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) W = lax.dot_general( query_prime, Z, ((contract_query, contract_z), (batch_dims_t, batch_dims_t)), precision=precision ) if not self.renormalize_attention: # Bidirectional, not-normalized attention. perm_inv = _invert_perm(qk_perm) result = W.transpose(perm_inv) return result else: # Bidirectional, normalized attention. thick_all_ones = jnp.zeros(key.shape[0:-1]) + jnp.ones(key_extra.shape[0 : len(axis)]) contract_key = tuple(range(len(batch_dims), len(batch_dims) + len(axis))) contract_thick_all_ones = tuple(range(thick_all_ones.ndim - len(axis), thick_all_ones.ndim)) # Construct T = (K^{'})^{T} 1_L # k (bs, <non-attention dims>, num_heads, <attention dims>, channels) T = lax.dot_general( key_prime, thick_all_ones, ((contract_key, contract_thick_all_ones), (batch_dims_t, batch_dims_t)), precision=precision, ) # Construct partition function: R = Q^{'} T = Q^{'}(K^{'})^{T} 1_L # q_p (bs, <non-attention dims>, num_heads, <attention dims>, channs_m) # T (bs, <non-attention dims>, num_heads, channels_m) R = lax.dot_general( query_prime, T, (((query_prime.ndim - 1,), (T.ndim - 1,)), (batch_dims_t, range(0, len(T.shape) - 1))), precision=precision, ) R = R + 2 * self.numerical_stabilizer * (jnp.abs(R) <= self.numerical_stabilizer) R = jnp.reciprocal(R) R = jnp.expand_dims(R, len(R.shape)) # W (bs, <non-attention dims>, num_heads, <attention dims>, channels_v) # R (bs, <non-attention dims>, num_heads, <attention dims>, extra_channel) result = W * R # back to (bs, dim1, dim2, ..., dimN, num_heads, channels) perm_inv = _invert_perm(qk_perm) result = result.transpose(perm_inv) return result def _invert_perm(perm): perm_inv = [0] * len(perm) for i, j in enumerate(perm): perm_inv[j] = i return tuple(perm_inv)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/performer/full_script.sh
TOKENIZERS_PARALLELISM=true python run_mlm_performer.py --output_dir experiments --dataset_name wikipedia --dataset_config_name 20200501.en --model_name_or_path bert-large-cased --tokenizer_name bert-large-cased --do_train --overwrite_output_dir --per_device_train_batch_size 4 --learning_rate 5e-4 --warmup_steps 100 --num_train_epochs 3 --performer
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/performer/run_mlm_performer.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ import logging import os import sys from dataclasses import dataclass, field # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from pathlib import Path from typing import Dict, List, Optional, Tuple import jax import jax.numpy as jnp import numpy as np from datasets import load_dataset from flax import jax_utils from flax.optim import Adam from flax.training import common_utils from flax.training.common_utils import get_metrics from jax.nn import log_softmax from modeling_flax_performer import FlaxPerformerForMaskedLM from tqdm import tqdm from transformers import ( MODEL_FOR_MASKED_LM_MAPPING, AutoTokenizer, BertConfig, FlaxBertForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) # Cache the result has_tensorboard = is_tensorboard_available() if has_tensorboard: try: from flax.metrics.tensorboard import SummaryWriter except ImportError as ie: has_tensorboard = False print(f"Unable to display metrics through TensorBoard because some package are not installed: {ie}") else: print( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class WandbArguments: """ Arguments for logging """ wandb_user_name: Optional[str] = field( default=None, metadata={"help": "The WandB user name for potential logging. If left None, no logging"}, ) wandb_project_name: Optional[str] = field( default="performer-experiments", metadata={"help": "The WandB project name for potential logging"}, ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) performer: bool = field( default=False, metadata={"help": "Whether to use FAVOR+ attention"}, ) reinitialize: bool = field( default=False, metadata={"help": "Whether to use a blank model without pretraining"}, ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." # Adapted from transformers/data/data_collator.py # Letting here for now, let's discuss where it should live @dataclass class FlaxDataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm: bool = True mlm_probability: float = 0.15 def __post_init__(self): if self.mlm and self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__(self, examples: List[Dict[str, np.ndarray]], pad_to_multiple_of: int) -> Dict[str, np.ndarray]: # Handle dict or lists with proper padding and conversion to tensor. batch = self.tokenizer.pad(examples, pad_to_multiple_of=pad_to_multiple_of, return_tensors=TensorType.NUMPY) # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) if self.mlm: batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) else: labels = batch["input_ids"].copy() if self.tokenizer.pad_token_id is not None: labels[labels == self.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch def mask_tokens( self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] ) -> Tuple[jnp.ndarray, jnp.ndarray]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def create_learning_rate_scheduler( factors="constant * linear_warmup * rsqrt_decay", base_learning_rate=0.5, warmup_steps=1000, decay_factor=0.5, steps_per_decay=20000, steps_per_cycle=100000, ): """Creates learning rate schedule. Interprets factors in the factors string which can consist of: * constant: interpreted as the constant value, * linear_warmup: interpreted as linear warmup until warmup_steps, * rsqrt_decay: divide by square root of max(step, warmup_steps) * rsqrt_normalized_decay: divide by square root of max(step/warmup_steps, 1) * decay_every: Every k steps decay the learning rate by decay_factor. * cosine_decay: Cyclic cosine decay, uses steps_per_cycle parameter. Args: factors: string, factors separated by "*" that defines the schedule. base_learning_rate: float, the starting constant for the lr schedule. warmup_steps: int, how many steps to warm up for in the warmup schedule. decay_factor: float, the amount to decay the learning rate by. steps_per_decay: int, how often to decay the learning rate. steps_per_cycle: int, steps per cycle when using cosine decay. Returns: a function learning_rate(step): float -> {"learning_rate": float}, the step-dependent lr. """ factors = [n.strip() for n in factors.split("*")] def step_fn(step): """Step to learning rate function.""" ret = 1.0 for name in factors: if name == "constant": ret *= base_learning_rate elif name == "linear_warmup": ret *= jnp.minimum(1.0, step / warmup_steps) elif name == "rsqrt_decay": ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == "rsqrt_normalized_decay": ret *= jnp.sqrt(warmup_steps) ret /= jnp.sqrt(jnp.maximum(step, warmup_steps)) elif name == "decay_every": ret *= decay_factor ** (step // steps_per_decay) elif name == "cosine_decay": progress = jnp.maximum(0.0, (step - warmup_steps) / float(steps_per_cycle)) ret *= jnp.maximum(0.0, 0.5 * (1.0 + jnp.cos(jnp.pi * (progress % 1.0)))) else: raise ValueError("Unknown factor %s." % name) return jnp.asarray(ret, dtype=jnp.float32) return step_fn def compute_metrics(logits, labels, weights, label_smoothing=0.0): """Compute summary metrics.""" loss, normalizer = cross_entropy(logits, labels, weights, label_smoothing) acc, _ = accuracy(logits, labels, weights) metrics = {"loss": loss, "accuracy": acc, "normalizer": normalizer} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics def accuracy(logits, targets, weights=None): """Compute weighted accuracy for log probs and targets. Args: logits: [batch, length, num_classes] float array. targets: categorical targets [batch, length] int array. weights: None or array of shape [batch, length] Returns: Tuple of scalar loss and batch normalizing factor. """ if logits.ndim != targets.ndim + 1: raise ValueError( "Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape)) ) loss = jnp.equal(jnp.argmax(logits, axis=-1), targets) loss *= weights return loss.sum(), weights.sum() def cross_entropy(logits, targets, weights=None, label_smoothing=0.0): """Compute cross entropy and entropy for log probs and targets. Args: logits: [batch, length, num_classes] float array. targets: categorical targets [batch, length] int array. weights: None or array of shape [batch, length] label_smoothing: label smoothing constant, used to determine the on and off values. Returns: Tuple of scalar loss and batch normalizing factor. """ if logits.ndim != targets.ndim + 1: raise ValueError( "Incorrect shapes. Got shape %s logits and %s targets" % (str(logits.shape), str(targets.shape)) ) vocab_size = logits.shape[-1] confidence = 1.0 - label_smoothing low_confidence = (1.0 - confidence) / (vocab_size - 1) normalizing_constant = -( confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20) ) soft_targets = common_utils.onehot(targets, vocab_size, on_value=confidence, off_value=low_confidence) loss = -jnp.sum(soft_targets * log_softmax(logits), axis=-1) loss = loss - normalizing_constant if weights is not None: loss = loss * weights normalizing_factor = weights.sum() else: normalizing_factor = np.prod(targets.shape) return loss.sum(), normalizing_factor def training_step(optimizer, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): targets = batch.pop("labels") # Hide away tokens which doesn't participate in the optimization token_mask = jnp.where(targets > 0, 1.0, 0.0) logits = model(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss, weight_sum = cross_entropy(logits, targets, token_mask) return loss / weight_sum step = optimizer.state.step lr = lr_scheduler_fn(step) grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, "batch") optimizer = optimizer.apply_gradient(grad, learning_rate=lr) return loss, optimizer, new_dropout_rng def eval_step(params, batch): """ Calculate evaluation metrics on a batch. """ targets = batch.pop("labels") # Hide away tokens which doesn't participate in the optimization token_mask = jnp.where(targets > 0, 1.0, 0.0) logits = model(**batch, params=params, train=False)[0] return compute_metrics(logits, targets, token_mask) def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: nb_samples = len(samples_idx) samples_to_remove = nb_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = nb_samples // batch_size batch_idx = np.split(samples_idx, sections_split) return batch_idx if __name__ == "__main__": # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments, WandbArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args, wandb_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1]) ) else: model_args, data_args, training_args, wandb_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level="NOTSET", datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) config = BertConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) lm_class = FlaxPerformerForMaskedLM if model_args.performer else FlaxBertForMaskedLM if model_args.reinitialize: model = lm_class(config=BertConfig.from_pretrained(model_args.model_name_or_path)) else: model = lm_class.from_pretrained( model_args.model_name_or_path, dtype=jnp.float32, input_shape=(training_args.train_batch_size, config.max_position_embeddings), seed=training_args.seed, dropout_rate=0.1, ) if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples = [line for line in examples if len(line) > 0 and not line.isspace()] return tokenizer( examples, return_special_tokens_mask=True, padding=padding, truncation=True, max_length=data_args.max_seq_length, ) tokenized_datasets = datasets.map( tokenize_function, input_columns=[text_column_name], batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) # Enable tensorboard only on the master node if has_tensorboard and jax.host_id() == 0: summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath("logs").as_posix()) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Setup optimizer optimizer = Adam( learning_rate=training_args.learning_rate, weight_decay=training_args.weight_decay, beta1=training_args.adam_beta1, beta2=training_args.adam_beta2, ).create(model.params) # Create learning rate scheduler lr_scheduler_fn = create_learning_rate_scheduler( base_learning_rate=training_args.learning_rate, warmup_steps=max(training_args.warmup_steps, 1) ) # Create parallel version of the training and evaluation steps p_training_step = jax.pmap(training_step, "batch", donate_argnums=(0,)) p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the optimizer on each device optimizer = jax_utils.replicate(optimizer) # Store some constant nb_epochs = int(training_args.num_train_epochs) batch_size = int(training_args.train_batch_size) eval_batch_size = int(training_args.eval_batch_size) if wandb_args.wandb_user_name is not None: import wandb wandb.init(project=wandb_args.wandb_project_name, entity=wandb_args.wandb_user_name) epochs = tqdm(range(nb_epochs), desc=f"Epoch ... (1/{nb_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ # Create sampling rng rng, training_rng, eval_rng = jax.random.split(rng, 3) # Generate an epoch by shuffling sampling indices from the train dataset nb_training_samples = len(tokenized_datasets["train"]) # Avoid using jax.numpy here in case of TPU training training_samples_idx = np.random.permutation(np.arange(nb_training_samples)) training_batch_idx = generate_batch_splits(training_samples_idx, batch_size) # Gather the indexes for creating the batch and do a training step for batch_idx in tqdm(training_batch_idx, desc="Training...", position=1): samples = [tokenized_datasets["train"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward model_inputs = common_utils.shard(model_inputs.data) loss, optimizer, dropout_rngs = p_training_step(optimizer, model_inputs, dropout_rngs) if wandb_args.wandb_user_name is not None: wandb.log({"Training loss": np.array(loss).mean()}) epochs.write(f"Loss: {loss}") # ======================== Evaluating ============================== nb_eval_samples = len(tokenized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(nb_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [tokenized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples, pad_to_multiple_of=16) # Model forward model_inputs = common_utils.shard(model_inputs.data) metrics = p_eval_step(optimizer.target, model_inputs) eval_metrics.append(metrics) eval_metrics_np = get_metrics(eval_metrics) eval_metrics_np = jax.tree_util.tree_map(jnp.sum, eval_metrics_np) eval_normalizer = eval_metrics_np.pop("normalizer") eval_summary = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics_np) # Update progress bar epochs.desc = ( f"Epoch... ({epoch + 1}/{nb_epochs} | Loss: {eval_summary['loss']}, Acc: {eval_summary['accuracy']})" ) if wandb_args.wandb_user_name is not None: wandb.log({"Eval loss": np.array(eval_summary["loss"]).mean()}) # Save metrics if has_tensorboard and jax.host_id() == 0: for name, value in eval_summary.items(): summary_writer.scalar(name, value, epoch)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/performer/sanity_script.sh
TOKENIZERS_PARALLELISM=true python run_mlm_performer.py --output_dir experiments --dataset_name wikipedia --dataset_config_name 20200501.simple --model_name_or_path bert-base-cased --tokenizer_name bert-base-cased --do_train --overwrite_output_dir --per_device_train_batch_size 4 --learning_rate 5e-4 --warmup_steps 100 --num_train_epochs 3 --performer
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/performer/modeling_flax_performer.py
# coding=utf-8 # Copyright 2018 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Callable, Dict, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from jax.random import PRNGKey from modeling_flax_performer_utils import make_fast_softmax_attention from transformers.file_utils import add_start_docstrings from transformers.modeling_flax_utils import ACT2FN from transformers.models.bert.configuration_bert import BertConfig from transformers.models.bert.modeling_flax_bert import FlaxBertOnlyMLMHead, FlaxBertPreTrainedModel from transformers.utils import logging logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "BertConfig" _TOKENIZER_FOR_DOC = "BertTokenizer" BERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ class FlaxPerformerLayerNorm(nn.Module): """ Layer normalization (https://arxiv.org/abs/1607.06450). Operates on the last axis of the input data. """ epsilon: float = 1e-6 dtype: jnp.dtype = jnp.float32 # the dtype of the computation bias: bool = True # If True, bias (beta) is added. scale: bool = True # If True, multiply by scale (gamma). When the next layer is linear # (also e.g. nn.relu), this can be disabled since the scaling will be # done by the next layer. bias_init: jnp.ndarray = nn.initializers.zeros scale_init: jnp.ndarray = nn.initializers.ones @nn.compact def __call__(self, x): """ Applies layer normalization on the input. It normalizes the activations of the layer for each given example in a batch independently, rather than across a batch like Batch Normalization. i.e. applies a transformation that maintains the mean activation within each example close to 0 and the activation standard deviation close to 1 Args: x: the inputs Returns: Normalized inputs (the same shape as inputs). """ features = x.shape[-1] mean = jnp.mean(x, axis=-1, keepdims=True) mean2 = jnp.mean(jax.lax.square(x), axis=-1, keepdims=True) var = mean2 - jax.lax.square(mean) mul = jax.lax.rsqrt(var + self.epsilon) if self.scale: mul = mul * jnp.asarray(self.param("gamma", self.scale_init, (features,)), self.dtype) y = (x - mean) * mul if self.bias: y = y + jnp.asarray(self.param("beta", self.bias_init, (features,)), self.dtype) return y class FlaxPerformerEmbedding(nn.Module): """ Specify a new class for doing the embedding stuff as Flax's one use 'embedding' for the parameter name and PyTorch use 'weight' """ vocab_size: int hidden_size: int emb_init: Callable[..., np.ndarray] = nn.initializers.normal(stddev=0.1) @nn.compact def __call__(self, inputs): embedding = self.param("weight", self.emb_init, (self.vocab_size, self.hidden_size)) return jnp.take(embedding, inputs, axis=0) class FlaxPerformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" vocab_size: int hidden_size: int type_vocab_size: int max_length: int @nn.compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embed w_emb = FlaxPerformerEmbedding(self.vocab_size, self.hidden_size, name="word_embeddings")( jnp.atleast_2d(input_ids.astype("i4")) ) p_emb = FlaxPerformerEmbedding(self.max_length, self.hidden_size, name="position_embeddings")( jnp.atleast_2d(position_ids.astype("i4")) ) t_emb = FlaxPerformerEmbedding(self.type_vocab_size, self.hidden_size, name="token_type_embeddings")( jnp.atleast_2d(token_type_ids.astype("i4")) ) # Sum all embeddings summed_emb = w_emb + jnp.broadcast_to(p_emb, w_emb.shape) + t_emb # Layer Norm layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(summed_emb) return layer_norm class FlaxPerformerAttention(nn.Module): num_heads: int head_size: int @nn.compact def __call__(self, hidden_state, attention_mask): single_head_dim = self.head_size // self.num_heads fast_softmax_attention = make_fast_softmax_attention(qkv_dim=single_head_dim) self_att = nn.attention.SelfAttention( num_heads=self.num_heads, qkv_features=self.head_size, name="self", attention_fn=fast_softmax_attention )(hidden_state, attention_mask) layer_norm = FlaxPerformerLayerNorm(name="layer_norm")(self_att + hidden_state) return layer_norm class FlaxPerformerIntermediate(nn.Module): output_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, hidden_state): # TODO: Add ACT2FN reference to change activation function dense = nn.Dense(features=self.output_size, name="dense")(hidden_state) return ACT2FN[self.hidden_act](dense) class FlaxPerformerOutput(nn.Module): @nn.compact def __call__(self, intermediate_output, attention_output): hidden_state = nn.Dense(attention_output.shape[-1], name="dense")(intermediate_output) hidden_state = FlaxPerformerLayerNorm(name="layer_norm")(hidden_state + attention_output) return hidden_state class FlaxPerformerLayer(nn.Module): num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, hidden_state, attention_mask): attention = FlaxPerformerAttention(self.num_heads, self.head_size, name="attention")( hidden_state, attention_mask ) intermediate = FlaxPerformerIntermediate( self.intermediate_size, name="intermediate", hidden_act=self.hidden_act )(attention) output = FlaxPerformerOutput(name="output")(intermediate, attention) return output class FlaxPerformerLayerCollection(nn.Module): """ Stores N BertLayer(s) """ num_layers: int num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, inputs, attention_mask): assert self.num_layers > 0, f"num_layers should be >= 1, got ({self.num_layers})" # Initialize input / output input_i = inputs # Forward over all encoders for i in range(self.num_layers): layer = FlaxPerformerLayer( self.num_heads, self.head_size, self.intermediate_size, hidden_act=self.hidden_act, name=f"{i}" ) input_i = layer(input_i, attention_mask) return input_i class FlaxPerformerEncoder(nn.Module): num_layers: int num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" @nn.compact def __call__(self, hidden_state, attention_mask): layer = FlaxPerformerLayerCollection( self.num_layers, self.num_heads, self.head_size, self.intermediate_size, name="layer", hidden_act=self.hidden_act, )(hidden_state, attention_mask) return layer class FlaxPerformerPooler(nn.Module): @nn.compact def __call__(self, hidden_state): cls_token = hidden_state[:, 0] out = nn.Dense(hidden_state.shape[-1], name="dense")(cls_token) return jax.lax.tanh(out) class FlaxPerformerModule(nn.Module): vocab_size: int hidden_size: int type_vocab_size: int max_length: int num_encoder_layers: int num_heads: int head_size: int intermediate_size: int hidden_act: str = "gelu" add_pooling_layer: bool = True @nn.compact def __call__(self, input_ids, token_type_ids, position_ids, attention_mask): # Embedding embeddings = FlaxPerformerEmbeddings( self.vocab_size, self.hidden_size, self.type_vocab_size, self.max_length, name="embeddings" )(input_ids, token_type_ids, position_ids, attention_mask) # N stacked encoding layers encoder = FlaxPerformerEncoder( self.num_encoder_layers, self.num_heads, self.head_size, self.intermediate_size, hidden_act=self.hidden_act, name="encoder", )(embeddings, attention_mask) if not self.add_pooling_layer: return encoder pooled = FlaxPerformerPooler(name="pooler")(encoder) return encoder, pooled @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class FlaxPerformerModel(FlaxBertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ model_class = FlaxPerformerModule config_class = BertConfig base_model_prefix = "bert" @staticmethod def convert_from_pytorch(pt_state: Dict, config: BertConfig) -> Dict: jax_state = dict(pt_state) # Need to change some parameters name to match Flax names so that we don't have to fork any layer for key, tensor in pt_state.items(): # Key parts key_parts = set(key.split(".")) # Every dense layer has "kernel" parameters instead of "weight" if "dense.weight" in key: del jax_state[key] key = key.replace("weight", "kernel") jax_state[key] = tensor # SelfAttention needs also to replace "weight" by "kernel" if {"query", "key", "value"} & key_parts: # Flax SelfAttention decomposes the heads (num_head, size // num_heads) if "bias" in key: jax_state[key] = tensor.reshape((config.num_attention_heads, -1)) elif "weight": del jax_state[key] key = key.replace("weight", "kernel") tensor = tensor.reshape((config.num_attention_heads, -1, config.hidden_size)).transpose((2, 0, 1)) jax_state[key] = tensor # SelfAttention output is not a separate layer, remove one nesting if "attention.output.dense" in key: del jax_state[key] key = key.replace("attention.output.dense", "attention.self.out") jax_state[key] = tensor # SelfAttention output is not a separate layer, remove nesting on layer norm if "attention.output.LayerNorm" in key: del jax_state[key] key = key.replace("attention.output.LayerNorm", "attention.LayerNorm") jax_state[key] = tensor # There are some transposed parameters w.r.t their PyTorch counterpart if "intermediate.dense.kernel" in key or "output.dense.kernel" in key: jax_state[key] = tensor.T # Self Attention output projection needs to be transposed if "out.kernel" in key: jax_state[key] = tensor.reshape((config.hidden_size, config.num_attention_heads, -1)).transpose( 1, 2, 0 ) # Pooler needs to transpose its kernel if "pooler.dense.kernel" in key: jax_state[key] = tensor.T # Handle LayerNorm conversion if "LayerNorm" in key: del jax_state[key] # Replace LayerNorm by layer_norm new_key = key.replace("LayerNorm", "layer_norm") if "weight" in key: new_key = new_key.replace("weight", "gamma") elif "bias" in key: new_key = new_key.replace("bias", "beta") jax_state[new_key] = tensor return jax_state def __init__( self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs ): module = FlaxPerformerModule( vocab_size=config.vocab_size, hidden_size=config.hidden_size, type_vocab_size=config.type_vocab_size, max_length=config.max_position_embeddings, num_encoder_layers=config.num_hidden_layers, num_heads=config.num_attention_heads, head_size=config.hidden_size, intermediate_size=config.intermediate_size, dropout_rate=config.hidden_dropout_prob, hidden_act=config.hidden_act, ) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) @property def module(self) -> nn.Module: return self._module def __call__( self, input_ids, token_type_ids=None, position_ids=None, dropout_rng: PRNGKey = None, attention_mask=None ): input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs( input_ids, attention_mask, token_type_ids, position_ids ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), rng=rngs, ) class FlaxPerformerForMaskedLM(FlaxBertPreTrainedModel): def __init__( self, config: BertConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs ): module = FlaxPerformerForMaskedLMModule( vocab_size=config.vocab_size, type_vocab_size=config.type_vocab_size, hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, head_size=config.hidden_size, num_heads=config.num_attention_heads, num_encoder_layers=config.num_hidden_layers, max_length=config.max_position_embeddings, hidden_act=config.hidden_act, **kwargs, ) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, params: dict = None, train: bool = False, dropout_rng: PRNGKey = None, ): input_ids, attention_mask, token_type_ids, position_ids = self._check_inputs( input_ids, attention_mask, token_type_ids, position_ids ) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), jnp.array(position_ids, dtype="i4"), not train, rngs=rngs, ) class FlaxPerformerForMaskedLMModule(nn.Module): vocab_size: int hidden_size: int intermediate_size: int head_size: int num_heads: int num_encoder_layers: int type_vocab_size: int max_length: int hidden_act: str dropout_rate: float = 0.0 dtype: jnp.dtype = jnp.float32 @nn.compact def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, deterministic: bool = True ): # Model encoder = FlaxPerformerModule( vocab_size=self.vocab_size, hidden_size=self.hidden_size, type_vocab_size=self.type_vocab_size, max_length=self.max_length, num_encoder_layers=self.num_encoder_layers, num_heads=self.num_heads, head_size=self.hidden_size, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, add_pooling_layer=False, name="bert", )(input_ids, attention_mask, token_type_ids, position_ids) # Compute the prediction scores encoder = nn.Dropout(rate=self.dropout_rate)(encoder, deterministic=deterministic) logits = FlaxBertOnlyMLMHead( vocab_size=self.vocab_size, hidden_act=self.hidden_act, name="cls", dtype=self.dtype )(encoder) return (logits,)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/information-gain-filtration/requirements.txt
matplotlib numpy>=1.17.2 joblib>=0.13.2 scipy torch>=1.10.1 transformers>=3.5
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/information-gain-filtration/README.md
# Information Gain Filtration(IGF) Authors @Tuko @mraunak This folder contains the code how to implement IGF for finetuning on GPT-2. ## What is IGF? Here we present a general fine-tuning method that we call information gain filtration for improving the overall training efficiency and final performance of language model fine-tuning(see paper below). The method is an alternative fine-tuning method that trains a secondary model (e.g., a simple convolutional network) to predict the amount of information gained over a given pre-trained model. The secondary model is lightweight and trained to predict the Information Gain measure. Information Gain is defined as the change in a loss function for a model before and after an SGD update with a sample (Equation X in the paper). A small subset of the training set named the “objective” set, is used to measure information gain on the pre-trained model, and consequently to train the secondary model. After training, the model is used for filtering samples for the fine-tuning process. Therefore, a high information gain value would suggest a sample is informative, whereas a low value would suggest a non-informative sample that should be filtered out. Thus, a thresholding strategy is defined to select informative samples. With such a strategy, samples are filtered and once enough samples are selected to form a mini-batch and a usual fine-tuning/optimization step is applied. The filtration process is repeated until the fine-tuning process is over. Paper [Selecting Informative Contexts Improves Language Model Finetuning](https://arxiv.org/abs/2005.00175) # Results Several experiments were conducted to show the robustness of the IGF method versus the standard fine-tuning process. For example, we achieve a median perplexity of 54.0 on the Books dataset compared to 57.3 for standard fine-tuning on GPT-2 Small. The code was implemented using the Transformers library and Pytorch. While the method may seem more expensive, we saw enough evidence that it may lead to a performance benefit in the final models. ![IGF performance](result_igf.png) Figure 1: Comparing IGF to Standard Fine-tuning: IGF with constant (p < 10−3 , t-test) and shifting(p < 10−6 , t-test) thresholding significantly outperform standard fine-tuning. The left-hand figure shows test-set perplexity after each fine-tuning batch, averaged over 50 runs (error bars denote ± one standard error). The right-hand figure shows the perplexity of each method after 60 batches. IGF with shifting thresholding (red) clearly improves over standard batched fine-tuning with Adam ## How to use this project? To fine-tune a transformer model with IGF on a language modeling task, use the following script: - `model_name_or_path`: Path to pretrained model or model identifier from huggingface.co/models - `data_file`: A jbl file containing tokenized data which can be split as objective dataset, train_dataset and test_dataset - `igf_data_file`: A jbl file containing the context and information gain pairs to train secondary learner. - `context_len`: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. - `size_objective_set`: Number of articles that are long enough to be used as our objective set" - `min_len`: The minimum length of the article to be used as objective set - `trim`: Truncate the example if it exceeds context length - `eval_freq`: Secondary model evaluation can be triggered at eval_freq - `max_steps`: To calculate training epochs - `number`: The number of examples split to be used as objective_set/test_data - `secondary_learner_batch_size`: The batch size of training data for secondary learner - `secondary_learner_max_epochs`: The number of epochs to train secondary learner - `recopy_model`: Reset the model to the original pretrained GPT-2 weights after each iteration - `eval_interval`: Decay the selectivity of our secondary learner filter from" 1 standard deviation above average to 1 below average after eval_interval(10) batches" ```python python run_clm_igf.py\ --model_name_or_path "gpt2" \ --data_file="data/tokenized_stories_train_wikitext103" \ --igf_data_file="data/IGF_values" \ --context_len 32 \ --size_objective_set 100 \ --min_len 1026 \ --trim True \ --eval_freq 100 \ --max_steps 1000 \ --secondary_learner_batch_size 128 \ --secondary_learner_max_epochs 15 \ --number 100 \ --recopy_model \ --eval_interval 10 \ ``` ## Citation If you find the resource useful, please cite the following paper ``` @inproceedings{antonello-etal-2021-selecting, title = "Selecting Informative Contexts Improves Language Model Fine-tuning", author = "Antonello, Richard and Beckage, Nicole and Turek, Javier and Huth, Alexander", booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.acl-long.87", doi = "10.18653/v1/2021.acl-long.87", pages = "1072--1085", } ```
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/information-gain-filtration/run_clm_igf.py
# Copyright 2022 - Intel Corp. All rights reserved. # Authors: Mayank Kumar Raunak, Javier Turek, Nicole Beckage """ Implementation of a new method for fine-tuning transformer models that we call Information Gain Filtration 'IGF' on WikiText data set and compared the results with the standard fine-tuning method Steps followed in the code: 1) Generate a objective dataset of pairs (X, IG(X)). IG(X)--Informativeness of context 'X'. Our IG (information gain) model is learning to predict the ‘informativeness’ of a particular context. Informativeness is the change in metric between the model’s accuracy on an objective set before and after seeing that context. For casual language modeling, the metric is perplexity. 2) A secondary learner is trained to infer a function approximation for IG using the dataset created in (1). 3) The learner created in (2) is used to inform the fine-tuning process and filter out low informative samples. Last, a plot is generated to compare the performance of IGF to standard fine-tuning without any filtering """ # Prerequisite libraries: import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpt2, recopy_gpt2, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPT2LMHeadModel def generate_n_pairs( context_len=32, max_steps=10, size_objective_set=100, min_len=1026, trim=True, data_file="data/tokenized_stories_train_wikitext103.jbl", igf_data_file="igf_context_pairs.jbl", ): """ Collecting *n* pairs for training the secondary learner Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs of secondary learner size_objective_set: size of objective data set used to create (X,IG(X)) pairs which is the training data for secondary learner min_len: The minimum length of the article to be used as objective set trim: If True truncate the context if it exceeds context length data_file: Tokenized data set split for training and evaluation of model igf_data_file: file to store (I,IG(X)) paired data set to train secondary learner Returns: Data stored in igf_data_file """ # generates same data everytime set_seed(3) # generate train_data and objective_set train_data, objective_set = generate_datasets( context_len, data_file, number=size_objective_set, min_len=1026, trim=True ) # keeps model same across runs set_seed(4) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # load pretrained model model = load_gpt2("gpt2").to(device) print("computing perplexity on objective set") orig_perp = compute_perplexity(model, objective_set, context_len).item() print("perplexity on objective set:", orig_perp) # collect igf pairs and save to file demo.jbl collect_objective_set(model, orig_perp, context_len, train_data, objective_set, max_steps, device, igf_data_file) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def training_secondary_learner( secondary_learner_train_data, secondary_learner_max_epochs=15, secondary_learner_batch_size=128, eval_freq=100, igf_model_path="igf_model.pt", ): """ Train the secondary learner Args: secondary_learner_train_data: Data set with (X,IG(X)) pairs to train secondary learner where IG(X) - measure of informativeness and X- context secondary_learner_max_epochs: Number of epochs to train secondary learner secondary_learner_batch_size: Batch size to train secondary learner eval_freq (object): secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner """ set_seed(42) # Load pre-trained model model = GPT2LMHeadModel.from_pretrained("gpt2") # Initialize secondary learner to use embedding weights of model secondary_learner = SecondaryLearner(model) # Train secondary learner secondary_learner = train_secondary_learner( secondary_learner, secondary_learner_train_data, max_epochs=secondary_learner_max_epochs, batch_size=secondary_learner_batch_size, eval_freq=100, igf_model_path=igf_model_path, ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def finetune( model, train_dataset, test_dataset, context_len=32, max_steps=1000, batch_size=16, threshold=1.0, recopy_model=recopy_gpt2, secondary_learner=None, eval_interval=10, finetuned_model_name="gpt2_finetuned.pt", ): """ fine-tune with IGF if secondary_learner is not None, else standard fine-tuning Args: model: pre-trained GPT-2 model train_dataset: Data set to train GPT-2 model test_dataset: Evaluate GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded max_steps: To calculate training epochs batch_size: Batch size to train GPT-2 model threshold: The threshold value used by secondary learner to filter the train_data and allow only" informative data as input to the model recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration secondary_learner: Selection of IGF as fine-tuning method if not None eval_interval: number of batches after which decay the selectivity of our secondary learner filter from 1 standard deviation above average to 1 below average fine-tuned_model_name: name of the final final-tuned GPT-2 model Returns: Fine-tuned GPT-2 model """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") train_sampler = RandomSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler) num_train_epochs = max_steps // (len(train_dataset)) + 1 global_step = 0 context = torch.zeros((1, context_len), dtype=torch.long, device=device) model, lm_optimizer, lm_scheduler = recopy_model(model, device, max_steps) model.train() if secondary_learner is not None: secondary_learner.to(device) secondary_learner.eval() contexts = [] examples = 0 observed_qs = [] test_perps = [] # Compute the performance of the transformer model at the beginning real_perp = compute_perplexity(model, test_dataset, context_len) test_perps.append(real_perp) print("Test perplexity, step", global_step, ":", real_perp) for epoch in range(int(num_train_epochs)): for step, example in enumerate(train_dataloader): torch.cuda.empty_cache() start = random.randint(0, example.size(2) - context_len - 1) context[0, :] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() outputs = model(context, labels=context) do_backprop = True if secondary_learner is not None: predicted_q = secondary_learner.forward( torch.tensor(context, dtype=torch.long, device=device).unsqueeze(0) )[0].item() observed_qs.append(float(predicted_q)) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: threshold = -1 if predicted_q < threshold: do_backprop = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu())) lm_loss = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() examples = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: real_perp = compute_perplexity(model, test_dataset, context_len) test_perps.append(real_perp) print("Test perplexity, step", global_step, ":", real_perp) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict(), finetuned_model_name) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def main(): parser = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task") # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain data files for WikiText.", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--data_file", type=str, default=None, help=( "A jbl file containing tokenized data which can be split as objective dataset, " "train_dataset and test_dataset." ), ) parser.add_argument( "--igf_data_file", type=str, default=None, help="A jbl file containing the context and information gain pairs to train secondary learner.", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the final fine-tuned model is stored.", ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--context_len", default=32, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--size_objective_set", default=100, type=int, help="number of articles that are long enough to be used as our objective set", ) parser.add_argument( "--eval_freq", default=100, type=int, help="secondary model evaluation is triggered at eval_freq" ) parser.add_argument("--max_steps", default=1000, type=int, help="To calculate training epochs") parser.add_argument( "--secondary_learner_batch_size", default=128, type=int, help="batch size of training data for secondary learner", ) parser.add_argument( "--batch_size", default=16, type=int, help="batch size of training data of language model(gpt2) " ) parser.add_argument( "--eval_interval", default=10, type=int, help=( "decay the selectivity of our secondary learner filter from" "1 standard deviation above average to 1 below average after 10 batches" ), ) parser.add_argument( "--number", default=100, type=int, help="The number of examples split to be used as objective_set/test_data" ) parser.add_argument( "--min_len", default=1026, type=int, help="The minimum length of the article to be used as objective set" ) parser.add_argument( "--secondary_learner_max_epochs", default=15, type=int, help="number of epochs to train secondary learner" ) parser.add_argument("--trim", default=True, type=bool, help="truncate the example if it exceeds context length") parser.add_argument( "--threshold", default=1.0, type=float, help=( "The threshold value used by secondary learner to filter the train_data and allow only" " informative data as input to the model" ), ) parser.add_argument("--finetuned_model_name", default="gpt2_finetuned.pt", type=str, help="finetuned_model_name") parser.add_argument( "--recopy_model", default=recopy_gpt2, type=str, help="Reset the model to the original pretrained GPT-2 weights after each iteration", ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32, max_steps=10, size_objective_set=100, min_len=1026, trim=True, data_file="data/tokenized_stories_train_wikitext103.jbl", igf_data_file="igf_context_pairs.jbl", ) # Load train data for secondary learner secondary_learner_train_data = joblib.load("data/IGF_values.jbl") # Train secondary learner secondary_learner = training_secondary_learner( secondary_learner_train_data, secondary_learner_max_epochs=15, secondary_learner_batch_size=128, eval_freq=100, igf_model_path="igf_model.pt", ) # load pretrained gpt2 model model = GPT2LMHeadModel.from_pretrained("gpt2") set_seed(42) # Generate train and test data to train and evaluate gpt2 model train_dataset, test_dataset = generate_datasets( context_len=32, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( model, train_dataset, test_dataset, context_len=32, max_steps=1000, batch_size=16, threshold=1.0, recopy_model=recopy_gpt2, secondary_learner=secondary_learner, eval_interval=10, finetuned_model_name="gpt2_finetuned.pt", ) if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects/information-gain-filtration
hf_public_repos/transformers/examples/research_projects/information-gain-filtration/igf/igf.py
# Copyright 2022 - Intel Corp. All rights reserved. # Authors: Mayank Kumar Raunak, Javier Turek, Nicole Backage import copy import logging import random import joblib import numpy as np import torch import torch.nn as nn from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AdamW, GPT2LMHeadModel, get_linear_schedule_with_warmup logger = logging.getLogger(__name__) def set_seed(seed): """ For reproducible training Args: seed: A seed for reproducible training """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) def compute_perplexity(model, test_data, context_len): """ Computes perplexity of the transformer model on data in test_data Args: model: Pre-trained GPT2 model test_data: Data on which perplexity calculation is required context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded Returns: Perplexity on input test data """ model.eval() device = next(model.parameters()).device eval_batch_size = 1 context = torch.zeros((eval_batch_size, context_len), dtype=torch.long, device=device) eval_dataloader = DataLoader(test_data, shuffle=False, batch_size=eval_batch_size) eval_loss = torch.zeros(1, device=device) nb_eval_examples = 0 for batch in eval_dataloader: batch.to(device) # pad context.zero_() for i in range(eval_batch_size): context[i, :] = batch[i] outputs = model(context, labels=context) eval_loss += outputs[0].sum().item() nb_eval_examples += batch.size(0) eval_loss = eval_loss / nb_eval_examples perplexity = torch.exp(eval_loss) model.train() return perplexity def load_gpt2(model_name="gpt2"): """ load original gpt2 and save off for quicker loading Args: model_name: GPT-2 Returns: GPT-2 model """ model = GPT2LMHeadModel.from_pretrained(model_name, output_hidden_states=True) torch.save(model.state_dict(), model_name + "local.pt") return model def recopy_gpt2(orig_model, device, max_steps): """ Reset the model to the original pretrained GPT-2 weights after each iteration Args: orig_model: Original pretrained GPT-2 model imported from Transformers library device: CPU/GPU max_steps: number of training steps Returns: Original PreTrained GPT-2 model, lm_optimizer: Adam optimizer with Decoupled weight decay lm_scheduler: linear scheduler with the appropriate schedule """ model = copy.deepcopy(orig_model) model.to(device) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] lm_optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8) lm_scheduler = get_linear_schedule_with_warmup(lm_optimizer, 0, max_steps) torch.cuda.empty_cache() return model, lm_optimizer, lm_scheduler def intermittent_save(contexts, real_perps, past_perps, filename): """ save the perplexity differences to filename Args: contexts: Example on which the perplexity is calculated real_perps: Perplexity after back-propagating on the selected context past_perps: Perplexity of model before training on the context filename: File to store perplexity differences Returns: file with perplexity differences """ # save the perplexity differences to filename avg = np.array(real_perps).mean() std = np.array(real_perps).std() perp_diff = (real_perps - avg) / std data_final = list(zip(contexts, perp_diff, past_perps)) joblib.dump(data_final, filename) def collect_objective_set( model, orig_perp, context_len, train_data, objective_set, max_steps, device, filename="dev.jbl", recopy_model=recopy_gpt2, ): """ Collect individual IGF values from pre-trained transformer model max_steps samples of training data to train secondary model Args: model: Pre-trained GPT2 model orig_perp: Perplexity of original pretrained GPT-2 model context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded train_data: Data to train model objective_set: Contexts used to create (X,IG(X)) pairs which is the training data for secondary learner max_steps: To calculate training epochs of model device: GPU/CPU filename: To store intermediate perplexity differences recopy_model: Reset the model to the original pretrained GPT-2 weights after each iteration Returns: file stored intermediate perplexity differences in intermediate stages """ # initialize variables to record relevant information contexts = [] real_perps = [] past_perps = [] # Initialize the transformer model orig_model = copy.deepcopy(model) orig_model.to(device="cpu") torch.cuda.empty_cache() # Compute perplexity of initial transformer model for comparison model.train() model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) for step in tqdm(range(max_steps)): context = torch.zeros((1, context_len), dtype=torch.long, device=device) story = random.choice(train_data) start = random.randint(0, len(story[0]) - context_len - 1) context[0, :] = story[0][start : start + context_len] lm_optimizer.zero_grad() outputs = model(context, labels=context) lm_loss = outputs[0] past_perp = compute_perplexity(model, context, context_len) model.train() lm_loss.backward() # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters(), 3.0) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule # Compute perplexity after back-propagating on the selected context real_perp = compute_perplexity(model, objective_set, context_len) # Periodically save the stored (X, IG(X)) pairs if step % 1000 == 0 and step > 1: intermittent_save(contexts, real_perps, past_perps, filename) # Reset the pretrained model to the original pretrained GPT-2 weights after each iteration model, lm_optimizer, lm_scheduler = recopy_model(orig_model, device, max_steps) past_perps.append(past_perp.item()) real_perps.append(orig_perp - real_perp.item()) contexts.append(np.array(context.cpu())) intermittent_save(contexts, real_perps, past_perps, filename) def generate_datasets( context_len, file="data/tokenized_stories_train_wikitext103.jbl", number=100, min_len=1026, trim=True ): """ Generate objective set and training set Args: context_len: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded file: Tokenized data split into training set and objective set number: size of objective dataset min_len: minimum length of a context in objective set trim: If True truncate the context if it exceeds context length Returns: Generated objective set and training data """ # Generate objective set and training set # Designate the first number (100) articles that are long enough to be used # as our objective set, rest (that are long enough) are training data for # secondary learner data = joblib.load(file) print("data loaded") objective_set = [] if trim: for i, example in enumerate(data): if len(example[0]) > min_len: start = random.randint(0, len(example[0]) - context_len - 1) objective_set.append(example[0, start : start + context_len]) if len(objective_set) >= number: break train_data = [] for j in range(i + 1, len(data)): if len(data[j][0]) > min_len: train_data.append(data[j]) else: objective_set = data[0:number] train_data = data[number:] joblib.dump(objective_set, "objective_set.jbl") print("objective set saved") return train_data, objective_set def train_secondary_learner( secondary_learner, train_dataset, max_epochs, batch_size, eval_freq=50, igf_model_path="secondary_learner.pt" ): """ Train the secondary learner (igf_model) Args: secondary_learner: secondary learner train_dataset: data to train secondary learner max_epochs: number of epochs to train secondary learner batch_size: batch size of training data of secondary learner eval_freq: secondary model evaluation can be triggered at eval_freq igf_model_path: path to store trained secondary learner Returns: Trained secondary learner """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # We will use the first 512 pairs from our dataset as a test set for # our secondary learner and the rest to train test_dataset = train_dataset[:512] train_dataset = train_dataset[512:] train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=batch_size) test_dataloader = DataLoader(test_dataset, shuffle=False, batch_size=batch_size) # secondary learner model set up loss = nn.MSELoss() test_loss = nn.MSELoss(reduction="sum") secondary_learner.to(device) q_optimizer = torch.optim.Adam(secondary_learner.parameters(), lr=0.00001) secondary_learner.train() # TODO in original code this is written as number of actual batches seen # not number of items seen but other places it is number of items instead. # improve consistency! changed this to epochs for clarity best_test_loss = float("inf") # Iterate through batches until we've used max_steps batches for epoch in range(int(max_epochs)): tr_q_loss = 0.0 secondary_learner.train() for step, batch in enumerate(train_dataloader): context = batch[0].to(device) real_q = batch[1].to(device) predicted_q = secondary_learner(context) q_optimizer.zero_grad() q_loss = loss(predicted_q, real_q.float()) q_loss.backward() q_optimizer.step() tr_q_loss += q_loss.item() # model trains fairly quickly so we won't wait for a full epoch # eval is triggered at eval_freq and end of epochs if (step % eval_freq == 0 and step > 0) or ((step + 1) == len(train_dataloader)): tr_loss = tr_q_loss / (step + 1) secondary_learner.eval() q_loss2 = 0.0 sum_q2 = 0.0 predicted = [] actual = [] # Compute performance of the secondary learner after this batch for step2, batch2 in enumerate(test_dataloader): features2 = batch2[0].to(device) real_q2 = batch2[1].to(device) predicted_q2 = secondary_learner(features2) q_loss2 += test_loss(predicted_q2, real_q2).item() sum_q2 += torch.sum(predicted_q2).item() for ei, i in enumerate(predicted_q2.cpu().detach().numpy()): predicted.append(i.item()) for ei, i in enumerate(real_q2.cpu().detach().numpy()): actual.append(i.item()) q_loss2 /= len(test_dataset) print( "Epoch: ", epoch, "step: ", step, "Avg. q:", sum_q2 / len(test_dataset), "Train Loss: ", tr_loss, "Test Loss: ", q_loss2, ) if q_loss2 < best_test_loss: joblib.dump((predicted, actual), "pred_vs_actual.jbl") torch.save(secondary_learner.state_dict(), igf_model_path) best_test_loss = q_loss2 secondary_learner.train() return secondary_learner class SecondaryLearner(nn.Module): """ Our secondary learner """ def __init__(self, model): """ We use a simple convolutional network as our secondary learner Args: model: Pre-trained GPT2 model """ # embeddings are from the pretrained model super(SecondaryLearner, self).__init__() self.embeddings = model.transformer.wte self.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) self.conv = nn.Conv1d(self.embeddings.weight.size(1), 256, 3, padding=1) self.fc = nn.Sequential(nn.Linear(256, 32), nn.Dropout(p=0.1), nn.Linear(32, 32), nn.Linear(32, 1)) def forward(self, context): """ Forward pass through the secondary learner Args: context: Context input to the secondary learner Returns: tensor after squeeze operation """ pooled = torch.max(self.conv(self.embeddings(context).squeeze(1).transpose(1, 2)), 2)[0] qs = self.fc(pooled) return qs.squeeze(1) @classmethod def from_pretrained(cls, state_path, model): """ Load the secondary learner Args: state_path: Path to save secondary learner model: Pretrained GPT-2 Returns: secondary learner """ secondary_learner = cls(model) # this calls __init__ state_dict = torch.load(state_path) secondary_learner.load_state_dict(state_dict) secondary_learner.embeddings = model.transformer.wte secondary_learner.embeddings.weight = copy.deepcopy(model.transformer.wte.weight) return secondary_learner
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/requirements.txt
transformers datasets torch>=1.5.0 torchaudio jiwer==2.2.0 lang-trans==0.6.0 librosa==0.8.0
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/run_common_voice.py
#!/usr/bin/env python3 import json import logging import os import re import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import datasets import numpy as np import torch import torchaudio from packaging import version from torch import nn import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC, Wav2Vec2Processor, is_apex_available, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.getLogger(__name__) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) attention_dropout: Optional[float] = field( default=0.1, metadata={"help": "The dropout ratio for the attention probabilities."} ) activation_dropout: Optional[float] = field( default=0.1, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} ) hidden_dropout: Optional[float] = field( default=0.1, metadata={ "help": "The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler." }, ) feat_proj_dropout: Optional[float] = field( default=0.1, metadata={"help": "The dropout probabilitiy for all 1D convolutional layers in feature extractor."}, ) mask_time_prob: Optional[float] = field( default=0.05, metadata={ "help": ( "Propability of each feature vector along the time axis to be chosen as the start of the vector" "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature" "vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``." ) }, ) layerdrop: Optional[float] = field(default=0.0, metadata={"help": "The LayerDrop probability."}) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train+validation", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) chars_to_ignore: List[str] = list_field( default=[",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�"], metadata={"help": "A list of characters to remove from the transcripts."}, ) @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) labels_batch = self.processor.pad( labels=label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: train_dataset = datasets.load_dataset( "common_voice", data_args.dataset_config_name, split=data_args.train_split_name ) eval_dataset = datasets.load_dataset("common_voice", data_args.dataset_config_name, split="test") # Create and save tokenizer chars_to_ignore_regex = f'[{"".join(data_args.chars_to_ignore)}]' def remove_special_characters(batch): batch["text"] = re.sub(chars_to_ignore_regex, "", batch["sentence"]).lower() + " " return batch train_dataset = train_dataset.map(remove_special_characters, remove_columns=["sentence"]) eval_dataset = eval_dataset.map(remove_special_characters, remove_columns=["sentence"]) def extract_all_chars(batch): all_text = " ".join(batch["text"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]} vocab_train = train_dataset.map( extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=train_dataset.column_names, ) vocab_test = train_dataset.map( extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=eval_dataset.column_names, ) vocab_list = list(set(vocab_train["vocab"][0]) | set(vocab_test["vocab"][0])) vocab_dict = {v: k for k, v in enumerate(vocab_list)} vocab_dict["|"] = vocab_dict[" "] del vocab_dict[" "] vocab_dict["[UNK]"] = len(vocab_dict) vocab_dict["[PAD]"] = len(vocab_dict) with open("vocab.json", "w") as vocab_file: json.dump(vocab_dict, vocab_file) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. tokenizer = Wav2Vec2CTCTokenizer( "vocab.json", unk_token="[UNK]", pad_token="[PAD]", word_delimiter_token="|", ) feature_extractor = Wav2Vec2FeatureExtractor( feature_size=1, sampling_rate=16_000, padding_value=0.0, do_normalize=True, return_attention_mask=True ) processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) model = Wav2Vec2ForCTC.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, activation_dropout=model_args.activation_dropout, attention_dropout=model_args.attention_dropout, hidden_dropout=model_args.hidden_dropout, feat_proj_dropout=model_args.feat_proj_dropout, mask_time_prob=model_args.mask_time_prob, gradient_checkpointing=training_args.gradient_checkpointing, layerdrop=model_args.layerdrop, ctc_loss_reduction="mean", pad_token_id=processor.tokenizer.pad_token_id, vocab_size=len(processor.tokenizer), ) if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays and tokenize the targets. def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() batch["sampling_rate"] = 16_000 batch["target_text"] = batch["text"] return batch train_dataset = train_dataset.map( speech_file_to_array_fn, remove_columns=train_dataset.column_names, num_proc=data_args.preprocessing_num_workers, ) eval_dataset = eval_dataset.map( speech_file_to_array_fn, remove_columns=eval_dataset.column_names, num_proc=data_args.preprocessing_num_workers, ) def prepare_dataset(batch): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." processed_batch = processor( audio=batch["speech"], text=batch["target_text"], sampling_rate=batch["sampling_rate"][0] ) batch.update(processed_batch) return batch train_dataset = train_dataset.map( prepare_dataset, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) eval_dataset = eval_dataset.map( prepare_dataset, remove_columns=eval_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) # Metric wer_metric = datasets.load_metric("wer") def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() # Data collator data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) # Initialize our Trainer trainer = CTCTrainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor.feature_extractor, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None # Save the feature_extractor and the tokenizer if is_main_process(training_args.local_rank): processor.save_pretrained(training_args.output_dir) train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) return results if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/finetune_base_timit_asr.sh
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-base-timit-asr" \ --num_train_epochs="30" \ --per_device_train_batch_size="20" \ --per_device_eval_batch_size="20" \ --evaluation_strategy="steps" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-base" \ --fp16 \ --dataset_name="timit_asr" \ --train_split_name="train" \ --validation_split_name="test" \ --orthography="timit" \ --preprocessing_num_workers="$(nproc)" \ --group_by_length \ --freeze_feature_extractor \ --verbose_logging \
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/README.md
**NOTE**: This example is outdated and is not longer actively maintained. Please follow the new instructions of fine-tuning Wav2Vec2 [here](https://github.com/huggingface/transformers/blob/main/examples/pytorch/speech-recognition/README.md) ## Fine-tuning Wav2Vec2 The `run_asr.py` script allows one to fine-tune pretrained Wav2Vec2 models that can be found [here](https://huggingface.co/models?search=facebook/wav2vec2). This finetuning script can also be run as a google colab [TODO: here]( ). ### Fine-Tuning with TIMIT Let's take a look at the [script](./finetune_base_timit_asr.sh) used to fine-tune [wav2vec2-base](https://huggingface.co/facebook/wav2vec2-base) with the [TIMIT dataset](https://huggingface.co/datasets/timit_asr): ```bash #!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-base-timit-asr" \ --num_train_epochs="30" \ --per_device_train_batch_size="20" \ --per_device_eval_batch_size="20" \ --evaluation_strategy="steps" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-base" \ --fp16 \ --dataset_name="timit_asr" \ --train_split_name="train" \ --validation_split_name="test" \ --orthography="timit" \ --preprocessing_num_workers="$(nproc)" \ --group_by_length \ --freeze_feature_extractor \ --verbose_logging \ ``` The resulting model and inference examples can be found [here](https://huggingface.co/elgeish/wav2vec2-base-timit-asr). Some of the arguments above may look unfamiliar, let's break down what's going on: `--orthography="timit"` applies certain text preprocessing rules, for tokenization and normalization, to clean up the dataset. In this case, we use the following instance of `Orthography`: ```python Orthography( do_lower_case=True, # break compounds like "quarter-century-old" and replace pauses "--" translation_table=str.maketrans({"-": " "}), ) ``` The instance above is used as follows: * creates a tokenizer with `do_lower_case=True` (ignores casing for input and lowercases output when decoding) * replaces `"-"` with `" "` to break compounds like `"quarter-century-old"` and to clean up suspended hyphens * cleans up consecutive whitespaces (replaces them with a single space: `" "`) * removes characters not in vocabulary (lacking respective sound units) `--verbose_logging` logs text preprocessing updates and when evaluating, using the validation split every `eval_steps`, logs references and predictions. ### Fine-Tuning with Arabic Speech Corpus Other datasets, like the [Arabic Speech Corpus dataset](https://huggingface.co/datasets/arabic_speech_corpus), require more work! Let's take a look at the [script](./finetune_large_xlsr_53_arabic_speech_corpus.sh) used to fine-tune [wav2vec2-large-xlsr-53](https://huggingface.co/elgeish/wav2vec2-large-xlsr-53-arabic): ```bash #!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-large-xlsr-53-arabic-speech-corpus" \ --num_train_epochs="50" \ --per_device_train_batch_size="1" \ --per_device_eval_batch_size="1" \ --gradient_accumulation_steps="8" \ --evaluation_strategy="steps" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="elgeish/wav2vec2-large-xlsr-53-arabic" \ --fp16 \ --dataset_name="arabic_speech_corpus" \ --train_split_name="train" \ --validation_split_name="test" \ --max_duration_in_seconds="15" \ --orthography="buckwalter" \ --preprocessing_num_workers="$(nproc)" \ --group_by_length \ --freeze_feature_extractor \ --target_feature_extractor_sampling_rate \ --verbose_logging \ ``` First, let's understand how this dataset represents Arabic text; it uses a format called [Buckwalter transliteration](https://en.wikipedia.org/wiki/Buckwalter_transliteration). We use the [lang-trans](https://github.com/kariminf/lang-trans) package to convert back to Arabic when logging. The Buckwalter format only includes ASCII characters, some of which are non-alpha (e.g., `">"` maps to `"أ"`). `--orthography="buckwalter"` applies certain text preprocessing rules, for tokenization and normalization, to clean up the dataset. In this case, we use the following instance of `Orthography`: ```python Orthography( vocab_file=pathlib.Path(__file__).parent.joinpath("vocab/buckwalter.json"), word_delimiter_token="/", # "|" is Arabic letter alef with madda above words_to_remove={"sil"}, # fixing "sil" in arabic_speech_corpus dataset untransliterator=arabic.buckwalter.untransliterate, translation_table=str.maketrans(translation_table = { "-": " ", # sometimes used to represent pauses "^": "v", # fixing "tha" in arabic_speech_corpus dataset }), ) ``` The instance above is used as follows: * creates a tokenizer with Buckwalter vocabulary and `word_delimiter_token="/"` * replaces `"-"` with `" "` to clean up hyphens and fixes the orthography for `"ث"` * removes words used as indicators (in this case, `"sil"` is used for silence) * cleans up consecutive whitespaces (replaces them with a single space: `" "`) * removes characters not in vocabulary (lacking respective sound units) `--verbose_logging` logs text preprocessing updates and when evaluating, using the validation split every `eval_steps`, logs references and predictions. Using the Buckwalter format, text is also logged in Arabic abjad. `--target_feature_extractor_sampling_rate` resamples audio to target feature extractor's sampling rate (16kHz). `--max_duration_in_seconds="15"` filters out examples whose audio is longer than the specified limit, which helps with capping GPU memory usage. ### DeepSpeed Integration To learn how to deploy Deepspeed Integration please refer to [this guide](https://huggingface.co/transformers/main/main_classes/deepspeed.html#deepspeed-trainer-integration). But to get started quickly all you need is to install: ``` pip install deepspeed ``` and then use the default configuration files in this directory: * `ds_config_wav2vec2_zero2.json` * `ds_config_wav2vec2_zero3.json` Here are examples of how you can use DeepSpeed: (edit the value for `--num_gpus` to match the number of GPUs you have) ZeRO-2: ``` PYTHONPATH=../../../src deepspeed --num_gpus 2 \ run_asr.py \ --output_dir=output_dir --num_train_epochs=2 --per_device_train_batch_size=2 \ --per_device_eval_batch_size=2 --evaluation_strategy=steps --save_steps=500 --eval_steps=100 \ --logging_steps=5 --learning_rate=5e-4 --warmup_steps=3000 \ --model_name_or_path=patrickvonplaten/wav2vec2_tiny_random_robust \ --dataset_name=hf-internal-testing/librispeech_asr_dummy --dataset_config_name=clean \ --train_split_name=validation --validation_split_name=validation --orthography=timit \ --preprocessing_num_workers=1 --group_by_length --freeze_feature_extractor --verbose_logging \ --deepspeed ds_config_wav2vec2_zero2.json ``` For ZeRO-2 with more than 1 gpu you need to use (which is already in the example configuration file): ``` "zero_optimization": { ... "find_unused_parameters": true, ... } ``` ZeRO-3: ``` PYTHONPATH=../../../src deepspeed --num_gpus 2 \ run_asr.py \ --output_dir=output_dir --num_train_epochs=2 --per_device_train_batch_size=2 \ --per_device_eval_batch_size=2 --evaluation_strategy=steps --save_steps=500 --eval_steps=100 \ --logging_steps=5 --learning_rate=5e-4 --warmup_steps=3000 \ --model_name_or_path=patrickvonplaten/wav2vec2_tiny_random_robust \ --dataset_name=hf-internal-testing/librispeech_asr_dummy --dataset_config_name=clean \ --train_split_name=validation --validation_split_name=validation --orthography=timit \ --preprocessing_num_workers=1 --group_by_length --freeze_feature_extractor --verbose_logging \ --deepspeed ds_config_wav2vec2_zero3.json ``` ### Pretraining Wav2Vec2 The `run_pretrain.py` script allows one to pretrain a Wav2Vec2 model from scratch using Wav2Vec2's contrastive loss objective (see official [paper](https://arxiv.org/abs/2006.11477) for more information). It is recommended to pre-train Wav2Vec2 with Trainer + Deepspeed (please refer to [this guide](https://huggingface.co/transformers/main/main_classes/deepspeed.html#deepspeed-trainer-integration) for more information). Here is an example of how you can use DeepSpeed ZeRO-2 to pretrain a small Wav2Vec2 model: ``` PYTHONPATH=../../../src deepspeed --num_gpus 4 run_pretrain.py \ --output_dir="./wav2vec2-base-libri-100h" \ --num_train_epochs="3" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --gradient_accumulation_steps="2" \ --save_total_limit="3" \ --save_steps="500" \ --logging_steps="10" \ --learning_rate="5e-4" \ --weight_decay="0.01" \ --warmup_steps="3000" \ --model_name_or_path="patrickvonplaten/wav2vec2-base-libri-100h" \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="4" \ --max_duration_in_seconds="10.0" \ --group_by_length \ --verbose_logging \ --fp16 \ --deepspeed ds_config_wav2vec2_zero2.json \ ``` ### Forced Alignment Character level forced alignment for audio and text pairs with wav2vec2 models finetuned on ASR task for a specific language. Inspired by [this](https://pytorch.org/tutorials/intermediate/forced_alignment_with_torchaudio_tutorial.html) Pytorch tutorial. #### Input Formats Input format in script.txt Input format in wavs directroy 0000 sentence1 0000.wav 0001 sentence2 0001.wav #### Output Format Output directory will contain 0000.txt and 0001.txt. Each file will have format like below char score start_ms end_ms h 0.25 1440 1520 #### Run command ``` python alignment.py \ --model_name="arijitx/wav2vec2-xls-r-300m-bengali" \ --wav_dir="./wavs" --text_file="script.txt" \ --input_wavs_sr=48000 \ --output_dir="./out_alignment" \ --cuda ```
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/alignment.py
# Parts of the code are adapted from the snippets provided in the TorchAudio Wav2Vec forced alignment tutorial. # The full tutorial can be found here: https://pytorch.org/audio/stable/tutorials/forced_alignment_tutorial.html import argparse import os from dataclasses import dataclass import torch import torchaudio from tqdm import tqdm from transformers import AutoConfig, AutoModelForCTC, AutoProcessor class Wav2Vec2Aligner: def __init__(self, model_name, input_wavs_sr, cuda): self.cuda = cuda self.config = AutoConfig.from_pretrained(model_name) self.model = AutoModelForCTC.from_pretrained(model_name) self.model.eval() if self.cuda: self.model.to(device="cuda") self.processor = AutoProcessor.from_pretrained(model_name) self.resampler = torchaudio.transforms.Resample(input_wavs_sr, 16_000) blank_id = 0 vocab = list(self.processor.tokenizer.get_vocab().keys()) for i in range(len(vocab)): if vocab[i] == "[PAD]" or vocab[i] == "<pad>": blank_id = i print("Blank Token id [PAD]/<pad>", blank_id) self.blank_id = blank_id def speech_file_to_array_fn(self, wav_path): speech_array, sampling_rate = torchaudio.load(wav_path) speech = self.resampler(speech_array).squeeze().numpy() return speech def align_single_sample(self, item): blank_id = self.blank_id transcript = "|".join(item["sent"].split(" ")) if not os.path.isfile(item["wav_path"]): print(item["wav_path"], "not found in wavs directory") speech_array = self.speech_file_to_array_fn(item["wav_path"]) inputs = self.processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) if self.cuda: inputs = inputs.to(device="cuda") with torch.no_grad(): logits = self.model(inputs.input_values).logits # get the emission probability at frame level emissions = torch.log_softmax(logits, dim=-1) emission = emissions[0].cpu().detach() # get labels from vocab labels = ([""] + list(self.processor.tokenizer.get_vocab().keys()))[ :-1 ] # logits don't align with the tokenizer's vocab dictionary = {c: i for i, c in enumerate(labels)} tokens = [] for c in transcript: if c in dictionary: tokens.append(dictionary[c]) def get_trellis(emission, tokens, blank_id=0): """ Build a trellis matrix of shape (num_frames + 1, num_tokens + 1) that represents the probabilities of each source token being at a certain time step """ num_frames = emission.size(0) num_tokens = len(tokens) # Trellis has extra diemsions for both time axis and tokens. # The extra dim for tokens represents <SoS> (start-of-sentence) # The extra dim for time axis is for simplification of the code. trellis = torch.full((num_frames + 1, num_tokens + 1), -float("inf")) trellis[:, 0] = 0 for t in range(num_frames): trellis[t + 1, 1:] = torch.maximum( # Score for staying at the same token trellis[t, 1:] + emission[t, blank_id], # Score for changing to the next token trellis[t, :-1] + emission[t, tokens], ) return trellis trellis = get_trellis(emission, tokens, blank_id) @dataclass class Point: token_index: int time_index: int score: float def backtrack(trellis, emission, tokens, blank_id=0): """ Walk backwards from the last (sentence_token, time_step) pair to build the optimal sequence alignment path """ # Note: # j and t are indices for trellis, which has extra dimensions # for time and tokens at the beginning. # When referring to time frame index `T` in trellis, # the corresponding index in emission is `T-1`. # Similarly, when referring to token index `J` in trellis, # the corresponding index in transcript is `J-1`. j = trellis.size(1) - 1 t_start = torch.argmax(trellis[:, j]).item() path = [] for t in range(t_start, 0, -1): # 1. Figure out if the current position was stay or change # Note (again): # `emission[J-1]` is the emission at time frame `J` of trellis dimension. # Score for token staying the same from time frame J-1 to T. stayed = trellis[t - 1, j] + emission[t - 1, blank_id] # Score for token changing from C-1 at T-1 to J at T. changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]] # 2. Store the path with frame-wise probability. prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item() # Return token index and time index in non-trellis coordinate. path.append(Point(j - 1, t - 1, prob)) # 3. Update the token if changed > stayed: j -= 1 if j == 0: break else: raise ValueError("Failed to align") return path[::-1] path = backtrack(trellis, emission, tokens, blank_id) @dataclass class Segment: label: str start: int end: int score: float def __repr__(self): return f"{self.label}\t{self.score:4.2f}\t{self.start*20:5d}\t{self.end*20:5d}" @property def length(self): return self.end - self.start def merge_repeats(path): """ Merge repeated tokens into a single segment. Note: this shouldn't affect repeated characters from the original sentences (e.g. `ll` in `hello`) """ i1, i2 = 0, 0 segments = [] while i1 < len(path): while i2 < len(path) and path[i1].token_index == path[i2].token_index: i2 += 1 score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1) segments.append( Segment( transcript[path[i1].token_index], path[i1].time_index, path[i2 - 1].time_index + 1, score, ) ) i1 = i2 return segments segments = merge_repeats(path) with open(item["out_path"], "w") as out_align: for seg in segments: out_align.write(str(seg) + "\n") def align_data(self, wav_dir, text_file, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) # load text file lines = open(text_file, encoding="utf8").readlines() items = [] for line in lines: if len(line.strip().split("\t")) != 2: print("Script must be in format: 00001 this is my sentence") exit() wav_name, sentence = line.strip().split("\t") wav_path = os.path.join(wav_dir, wav_name + ".wav") out_path = os.path.join(output_dir, wav_name + ".txt") items.append({"sent": sentence, "wav_path": wav_path, "out_path": out_path}) print("Number of samples found in script file", len(items)) for item in tqdm(items): self.align_single_sample(item) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="arijitx/wav2vec2-xls-r-300m-bengali", help="wav2vec model name" ) parser.add_argument("--wav_dir", type=str, default="./wavs", help="directory containing wavs") parser.add_argument("--text_file", type=str, default="script.txt", help="file containing text") parser.add_argument("--input_wavs_sr", type=int, default=16000, help="sampling rate of input audios") parser.add_argument( "--output_dir", type=str, default="./out_alignment", help="output directory containing the alignment files" ) parser.add_argument("--cuda", action="store_true") args = parser.parse_args() aligner = Wav2Vec2Aligner(args.model_name, args.input_wavs_sr, args.cuda) aligner.align_data(args.wav_dir, args.text_file, args.output_dir) if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/test_wav2vec2_deepspeed.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # XXX: we want transformers master here - in the absense of conftest manipulating sys.path: # hack it in for now: import sys from pathlib import Path git_repo_path = Path(__file__).resolve().parents[3] / "src" sys.path.insert(1, str(git_repo_path)) import dataclasses # noqa import io # noqa import itertools # noqa import json # noqa import os # noqa import unittest # noqa from copy import deepcopy # noqa from parameterized import parameterized # noqa from transformers import TrainingArguments, is_torch_available # noqa from transformers.deepspeed import is_deepspeed_available # noqa from transformers.file_utils import WEIGHTS_NAME # noqa from transformers.testing_utils import ( # noqa CaptureLogger, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_deepspeed, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed # noqa set_seed(42) models = {"base": "patrickvonplaten/wav2vec2_tiny_random", "robust": "patrickvonplaten/wav2vec2_tiny_random_robust"} ZERO2 = "zero2" ZERO3 = "zero3" stages = [ZERO2, ZERO3] def custom_name_func(func, param_num, param): # customize the test name generator function as we want both params to appear in the sub-test # name, as by default it shows only the first param param_based_name = parameterized.to_safe_name("_".join(str(x) for x in param.args)) return f"{func.__name__}_{param_based_name}" # Cartesian-product of zero stages with models to test params = list(itertools.product(stages, models.keys())) @slow @require_deepspeed @require_torch_gpu class TestDeepSpeedWav2Vec2(TestCasePlus): @parameterized.expand(params, name_func=custom_name_func) def test_fp32_non_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=False, fp16=False, ) @require_torch_multi_gpu @parameterized.expand(params, name_func=custom_name_func) def test_fp32_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=True, fp16=False, ) @parameterized.expand(params, name_func=custom_name_func) def test_fp16_non_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=False, fp16=True, ) @require_torch_multi_gpu @parameterized.expand(params, name_func=custom_name_func) def test_fp16_distributed(self, stage, model): self.run_and_check( stage=stage, model=model, distributed=True, fp16=True, ) def do_checks(self, output_dir): # XXX: run_asr is premature and doesn't save any results # so all we check for now is that the process didn't fail pass # XXX: need to do better validation beyond just that the run was successful def run_and_check( self, stage: str, model: str, eval_steps: int = 10, distributed: bool = True, quality_checks: bool = True, fp16: bool = True, ): model_name = models[model] output_dir = self.run_trainer( stage=stage, model_name=model_name, eval_steps=eval_steps, num_train_epochs=1, distributed=distributed, fp16=fp16, ) self.do_checks(output_dir) return output_dir def run_trainer( self, stage: str, model_name: str, eval_steps: int = 10, num_train_epochs: int = 1, distributed: bool = True, fp16: bool = True, ): output_dir = self.get_auto_remove_tmp_dir("./xxx", after=False) args = f""" --model_name_or_path {model_name} --dataset_name hf-internal-testing/librispeech_asr_dummy --dataset_config_name clean --train_split_name validation --validation_split_name validation --output_dir {output_dir} --num_train_epochs {str(num_train_epochs)} --per_device_train_batch_size 2 --per_device_eval_batch_size 2 --evaluation_strategy steps --learning_rate 5e-4 --warmup_steps 8 --orthography timit --preprocessing_num_workers 1 --group_by_length --freeze_feature_extractor --report_to none --save_steps 0 --eval_steps {eval_steps} --report_to none """.split() if fp16: args.extend(["--fp16"]) # currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true, # hence the separate config files ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split() script = [f"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"] launcher = self.get_launcher(distributed) cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) return output_dir def get_launcher(self, distributed=False): # 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup # - it won't be able to handle that # 2. for now testing with just 2 gpus max (since some quality tests may give different # results with mode gpus because we use very little data) num_gpus = min(2, get_gpu_count()) if distributed else 1 return f"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/run_pretrain.py
#!/usr/bin/env python3 import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2ForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) verbose_logging: Optional[bool] = field( default=False, metadata={"help": "Whether to log verbose messages or not."}, ) max_gumbel_temperature: Optional[float] = field( default=2.0, metadata={"help": "Maximum temperature for gumbel softmax."} ) min_gumbel_temperature: Optional[float] = field( default=0.5, metadata={"help": "Minimum temperature for gumbel softmax."} ) gumbel_temperature_decay: Optional[float] = field( default=0.999995, metadata={"help": "Decay of gumbel temperature during training."} ) def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logging_level = logging.WARNING if model_args.verbose_logging: logging_level = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank): logging_level = logging.INFO logger.setLevel(logging_level) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) validation_split_name: Optional[str] = field( default="validation", metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) speech_file_column: Optional[str] = field( default="file", metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) validation_split_percentage: Optional[int] = field( default=1, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_duration_in_seconds: Optional[float] = field( default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) @dataclass class DataCollatorForWav2Vec2Pretraining: """ Data collator that will dynamically pad the inputs received and prepare masked indices for self-supervised pretraining. Args: model (:class:`~transformers.Wav2Vec2ForPreTraining`): The Wav2Vec2 model used for pretraining. The data collator needs to have access to config and ``_get_feat_extract_output_lengths`` function for correct padding. feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ model: Wav2Vec2ForPreTraining feature_extractor: Wav2Vec2FeatureExtractor padding: Union[bool, str] = "longest" pad_to_multiple_of: Optional[int] = None max_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format batch = self.feature_extractor.pad( features, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) batch_size = batch["input_values"].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula output_lengths = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)).to( torch.long ) attention_mask = torch.zeros( (batch_size, mask_indices_seq_length), dtype=torch.long, device=batch["input_values"].device ) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[ (torch.arange(attention_mask.shape[0], device=batch["input_values"].device), output_lengths - 1) ] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() # sample randomly masked indices batch["mask_time_indices"] = _compute_mask_indices( (batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=attention_mask, min_masks=2, ) return batch class Wav2Vec2PreTrainer(Trainer): """ Subclassed :class:`~transformers.Trainer` for Wav2Vec2-like pretraining. Trainer can decay gumbel softmax temperature during training. """ def __init__(self, *args, max_gumbel_temp=1, min_gumbel_temp=0, gumbel_temp_decay=1.0, **kwargs): super().__init__(*args, **kwargs) self.num_update_step = 0 self.max_gumbel_temp = max_gumbel_temp self.min_gumbel_temp = min_gumbel_temp self.gumbel_temp_decay = gumbel_temp_decay def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["mask_time_indices"]).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp) ) return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() configure_logger(model_args, training_args) # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="validation", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}", cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True ) def prepare_dataset(batch): # check that all files have the correct sampling rate batch["speech"], _ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate) return batch # load audio files into numpy arrays vectorized_datasets = datasets.map( prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names ) # filter audio files that are too long vectorized_datasets = vectorized_datasets.filter( lambda data: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) ) def normalize(batch): return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate) # normalize and transform to `BatchFeatures` vectorized_datasets = vectorized_datasets.map( normalize, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["train"].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 config = Wav2Vec2Config.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) model = Wav2Vec2ForPreTraining(config) data_collator = DataCollatorForWav2Vec2Pretraining(model=model, feature_extractor=feature_extractor) trainer = Wav2Vec2PreTrainer( model=model, data_collator=data_collator, args=training_args, train_dataset=vectorized_datasets["train"], eval_dataset=vectorized_datasets["validation"], tokenizer=feature_extractor, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/finetune_large_lv60_100.sh
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-large-lv60-100h" \ --num_train_epochs="30" \ --per_device_train_batch_size="16" \ --per_device_eval_batch_size="16" \ --evaluation_strategy="steps" \ --save_total_limit="3" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-large-lv60" \ --fp16 \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="32" \ --group_by_length \ --freeze_feature_extractor
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/run_asr.py
#!/usr/bin/env python3 import logging import pathlib import re import sys from dataclasses import dataclass, field from typing import Any, Callable, Dict, List, Optional, Set, Union import datasets import librosa import numpy as np import torch from lang_trans import arabic from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2ForCTC, Wav2Vec2Processor, is_apex_available, trainer_utils, ) if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) verbose_logging: Optional[bool] = field( default=False, metadata={"help": "Whether to log verbose messages or not."}, ) def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logging_level = logging.WARNING if model_args.verbose_logging: logging_level = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank): logging_level = logging.INFO logger.setLevel(logging_level) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) validation_split_name: Optional[str] = field( default="validation", metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) target_text_column: Optional[str] = field( default="text", metadata={"help": "Column in the dataset that contains label (target text). Defaults to 'text'"}, ) speech_file_column: Optional[str] = field( default="file", metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, ) target_feature_extractor_sampling_rate: Optional[bool] = field( default=False, metadata={"help": "Resample loaded audio to target feature extractor's sampling rate or not."}, ) max_duration_in_seconds: Optional[float] = field( default=None, metadata={"help": "Filters out examples longer than specified. Defaults to no filtering."}, ) orthography: Optional[str] = field( default="librispeech", metadata={ "help": ( "Orthography used for normalization and tokenization: 'librispeech' (default), 'timit', or" " 'buckwalter'." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) @dataclass class Orthography: """ Orthography scheme used for text normalization and tokenization. Args: do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to accept lowercase input and lowercase the output when decoding. vocab_file (:obj:`str`, `optional`): File containing the vocabulary. word_delimiter_token (:obj:`str`, `optional`, defaults to :obj:`"|"`): The token used for delimiting words; it needs to be in the vocabulary. translation_table (:obj:`Dict[str, str]`, `optional`, defaults to :obj:`{}`): Table to use with `str.translate()` when preprocessing text (e.g., "-" -> " "). words_to_remove (:obj:`Set[str]`, `optional`, defaults to :obj:`set()`): Words to remove when preprocessing text (e.g., "sil"). untransliterator (:obj:`Callable[[str], str]`, `optional`): Function that untransliterates text back into native writing system. """ do_lower_case: bool = False vocab_file: Optional[str] = None word_delimiter_token: Optional[str] = "|" translation_table: Optional[Dict[str, str]] = field(default_factory=dict) words_to_remove: Optional[Set[str]] = field(default_factory=set) untransliterator: Optional[Callable[[str], str]] = None @classmethod def from_name(cls, name: str): if name == "librispeech": return cls() if name == "timit": return cls( do_lower_case=True, # break compounds like "quarter-century-old" and replace pauses "--" translation_table=str.maketrans({"-": " "}), ) if name == "buckwalter": translation_table = { "-": " ", # sometimes used to represent pauses "^": "v", # fixing "tha" in arabic_speech_corpus dataset } return cls( vocab_file=pathlib.Path(__file__).parent.joinpath("vocab/buckwalter.json"), word_delimiter_token="/", # "|" is Arabic letter alef with madda above translation_table=str.maketrans(translation_table), words_to_remove={"sil"}, # fixing "sil" in arabic_speech_corpus dataset untransliterator=arabic.buckwalter.untransliterate, ) raise ValueError(f"Unsupported orthography: '{name}'.") def preprocess_for_training(self, text: str) -> str: # TODO(elgeish) return a pipeline (e.g., from jiwer) instead? Or rely on branch predictor as is if len(self.translation_table) > 0: text = text.translate(self.translation_table) if len(self.words_to_remove) == 0: text = " ".join(text.split()) # clean up whitespaces else: text = " ".join(w for w in text.split() if w not in self.words_to_remove) # and clean up whilespaces return text def create_processor(self, model_args: ModelArguments) -> Wav2Vec2Processor: feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir ) if self.vocab_file: tokenizer = Wav2Vec2CTCTokenizer( self.vocab_file, cache_dir=model_args.cache_dir, do_lower_case=self.do_lower_case, word_delimiter_token=self.word_delimiter_token, ) else: tokenizer = Wav2Vec2CTCTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_lower_case=self.do_lower_case, word_delimiter_token=self.word_delimiter_token, ) return Wav2Vec2Processor(feature_extractor, tokenizer) @dataclass class DataCollatorCTCWithPadding: """ Data collator that will dynamically pad the inputs received. Args: processor (:class:`~transformers.Wav2Vec2Processor`) The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). max_length_labels (:obj:`int`, `optional`): Maximum length of the ``labels`` returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ processor: Wav2Vec2Processor padding: Union[bool, str] = True max_length: Optional[int] = None max_length_labels: Optional[int] = None pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] label_features = [{"input_ids": feature["labels"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) labels_batch = self.processor.pad( labels=label_features, padding=self.padding, max_length=self.max_length_labels, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) batch["labels"] = labels return batch class CTCTrainer(Trainer): def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]]) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ model.train() inputs = self._prepare_inputs(inputs) if self.use_amp: with autocast(): loss = self.compute_loss(model, inputs) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: if model.module.config.ctc_loss_reduction == "mean": loss = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": loss = loss.sum() / (inputs["labels"] >= 0).sum() else: raise ValueError(f"{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']") if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(loss).backward() elif self.use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(loss) else: loss.backward() return loss.detach() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() configure_logger(model_args, training_args) orthography = Orthography.from_name(data_args.orthography.lower()) processor = orthography.create_processor(model_args) model = Wav2Vec2ForCTC.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, vocab_size=len(processor.tokenizer), ) train_dataset = datasets.load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.train_split_name ) val_dataset = datasets.load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=data_args.validation_split_name ) wer_metric = datasets.load_metric("wer") target_sr = processor.feature_extractor.sampling_rate if data_args.target_feature_extractor_sampling_rate else None vocabulary_chars_str = "".join(t for t in processor.tokenizer.get_vocab().keys() if len(t) == 1) vocabulary_text_cleaner = re.compile( # remove characters not in vocabulary rf"[^\s{re.escape(vocabulary_chars_str)}]", # allow space in addition to chars in vocabulary flags=re.IGNORECASE if processor.tokenizer.do_lower_case else 0, ) text_updates = [] def prepare_example(example): # TODO(elgeish) make use of multiprocessing? example["speech"], example["sampling_rate"] = librosa.load(example[data_args.speech_file_column], sr=target_sr) if data_args.max_duration_in_seconds is not None: example["duration_in_seconds"] = len(example["speech"]) / example["sampling_rate"] # Normalize and clean up text; order matters! updated_text = orthography.preprocess_for_training(example[data_args.target_text_column]) updated_text = vocabulary_text_cleaner.sub("", updated_text) if updated_text != example[data_args.target_text_column]: text_updates.append((example[data_args.target_text_column], updated_text)) example[data_args.target_text_column] = updated_text return example train_dataset = train_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column]) val_dataset = val_dataset.map(prepare_example, remove_columns=[data_args.speech_file_column]) if data_args.max_duration_in_seconds is not None: def filter_by_max_duration(example): return example["duration_in_seconds"] <= data_args.max_duration_in_seconds old_train_size = len(train_dataset) old_val_size = len(val_dataset) train_dataset = train_dataset.filter(filter_by_max_duration, remove_columns=["duration_in_seconds"]) val_dataset = val_dataset.filter(filter_by_max_duration, remove_columns=["duration_in_seconds"]) if len(train_dataset) > old_train_size: logger.warning( f"Filtered out {len(train_dataset) - old_train_size} train example(s) longer than" f" {data_args.max_duration_in_seconds} second(s)." ) if len(val_dataset) > old_val_size: logger.warning( f"Filtered out {len(val_dataset) - old_val_size} validation example(s) longer than" f" {data_args.max_duration_in_seconds} second(s)." ) logger.info(f"Split sizes: {len(train_dataset)} train and {len(val_dataset)} validation.") logger.warning(f"Updated {len(text_updates)} transcript(s) using '{data_args.orthography}' orthography rules.") if logger.isEnabledFor(logging.DEBUG): for original_text, updated_text in text_updates: logger.debug(f'Updated text: "{original_text}" -> "{updated_text}"') text_updates = None def prepare_dataset(batch): # check that all files have the correct sampling rate assert ( len(set(batch["sampling_rate"])) == 1 ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}." processed_batch = processor( audio=batch["speech"], text=batch[data_args.target_text_column], sampling_rate=batch["sampling_rate"][0] ) batch.update(processed_batch) return batch train_dataset = train_dataset.map( prepare_dataset, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) val_dataset = val_dataset.map( prepare_dataset, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) data_collator = DataCollatorCTCWithPadding(processor=processor, padding=True) def compute_metrics(pred): pred_logits = pred.predictions pred_ids = np.argmax(pred_logits, axis=-1) pred.label_ids[pred.label_ids == -100] = processor.tokenizer.pad_token_id pred_str = processor.batch_decode(pred_ids) # we do not want to group tokens when computing the metrics label_str = processor.batch_decode(pred.label_ids, group_tokens=False) if logger.isEnabledFor(logging.DEBUG): for reference, predicted in zip(label_str, pred_str): logger.debug(f'reference: "{reference}"') logger.debug(f'predicted: "{predicted}"') if orthography.untransliterator is not None: logger.debug(f'reference (untransliterated): "{orthography.untransliterator(reference)}"') logger.debug(f'predicted (untransliterated): "{orthography.untransliterator(predicted)}"') wer = wer_metric.compute(predictions=pred_str, references=label_str) return {"wer": wer} if model_args.freeze_feature_extractor: model.freeze_feature_extractor() trainer = CTCTrainer( model=model, data_collator=data_collator, args=training_args, compute_metrics=compute_metrics, train_dataset=train_dataset, eval_dataset=val_dataset, tokenizer=processor.feature_extractor, ) trainer.train() if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/finetune_large_xlsr_53_arabic_speech_corpus.sh
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-large-xlsr-53-arabic-speech-corpus" \ --num_train_epochs="50" \ --per_device_train_batch_size="1" \ --per_device_eval_batch_size="1" \ --gradient_accumulation_steps="8" \ --evaluation_strategy="steps" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="elgeish/wav2vec2-large-xlsr-53-arabic" \ --fp16 \ --dataset_name="arabic_speech_corpus" \ --train_split_name="train" \ --validation_split_name="test" \ --max_duration_in_seconds="15" \ --orthography="buckwalter" \ --preprocessing_num_workers="$(nproc)" \ --group_by_length \ --freeze_feature_extractor \ --target_feature_extractor_sampling_rate \ --verbose_logging \
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/finetune_wav2vec2_xlsr_turkish.sh
#!/usr/bin/env bash python run_common_voice.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ --output_dir=./wav2vec2-large-xlsr-turkish-demo \ --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="16" \ --evaluation_strategy="steps" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --fp16 \ --freeze_feature_extractor \ --save_steps="400" \ --eval_steps="400" \ --save_total_limit="3" \ --logging_steps="400" \ --group_by_length \ --feat_proj_dropout="0.0" \ --layerdrop="0.1" \ --gradient_checkpointing \ --do_train --do_eval
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/FINE_TUNE_XLSR_WAV2VEC2.md
# Fine-Tuning week of XLSR-Wav2Vec2 on 60 languages 🌍 Welcome to the fine-tuning week! The goal of this week is to have state-of-the-art automatic speech recognition (ASR) models in as many languages as possible. The fine-tuning week ends on Friday, the 26th March at midnight PST time. Participants are encouraged to fine-tune the pretrained [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) checkpoint on one or more of the 60 languages of [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets). Furthermore, it is very much appreciated if participants fine-tune XLSR-Wav2Vec2 on a language that is not included in the Common Voice dataset. All fine-tuned models uploaded until Friday, the 26th March midnight PST, will be taken into account for competition, and the best model per language will be awarded a prize if the best model performs reasonably well. The testing data to evaluate the models will be the official [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets) *`test data`* of version 6.1. Again, participants are very much encouraged to fine-tune XLSR-Wav2Vec2 on languages that are not found in the Common Voice dataset since those languages are even more likely to be underrepresented in the speech community. Each model fine-tuned on a language not found in Common Voice, will be evaluated by the Hugging Face team after Friday, the 26th March at midnight PST, and if the model performs reasonably well, the model receives a prize as well. For more information on which data can be used for training, how the models are evaluated exactly, and what type of data preprocessing can be used, please see ["Training and Evaluation Rules"](#training-and-evaluation-rules). **Please keep in mind:** The spirit of the fine-tuning week is to provide state-of-the-art speech recognition in as many languages as possible to the community! So while we encourage healthy competition between people/groups of the same language so that better results are obtained, it is extremely important that we help each other and share our insights with the whole team/community. What matters in the end is what has been achieved by the team as a whole during the fine-tuning week. That being said, we strongly encourage people to share tips & tricks on the forum or Slack, help each other when team members encounter bugs, and work in groups. To make it easier to share and help, forum threads have been created under the name {language} ASR: Fine-Tuning Wav2Vec2, e.g. here. It is very much possible that prizes will be given to groups of people instead of individuals. Also, don't hesitate to ask questions, propose improvements to the organization, to the material given to participants, etc...🤗 ## Table of Contents - [Organization of the fine tuning week](#organization-of-the-fine-tuning-week) - [How to fine tune XLSR Wav2Vec2](#how-to-fine-tune-xlsr-wav2vec2) - [Google colab setup](#google-colab-setup) - [Local machine](#local-machine) - [How to upload my trained checkpoint](#how-to-upload-my-trained-checkpoint) - [How to create the README](#how-to-create-the-readme) - [How to evaluate my trained checkpoint](#how-to-evaluate-my-trained-checkpoint) - [Rules of training and evaluation](#rules-of-training-and-evaluation) - [Tips and tricks](#tips-and-tricks) - [How to combine multiple datasests into one](#how-to-combine-multiple-datasets-into-one) - [How to effectively preprocess the data](#how-to-effectively-preprocess-the-data) - [How to efficiently preproces the data](#how-to-do-efficiently-load-datasets-with-limited-ram-and-hard-drive-space) - [How to do hyperparameter tuning](#how-to-do-hyperparameter-tuning) - [How to preprocess and evaluate character based languages](#how-to-preprocess-and-evaluate-character-based-languages) - [Further reading material](#further-reading-material) - [FAQ](#faq) ## Organization of the fine tuning week The week officially starts on 22.03.2021 and ends on 29.03.2021, but you are more than welcome to start fine-tuning models before the start date. General questions you might have, general problems you encounter, and general tips can be shared directly on the Slack channel (see [this post](https://discuss.huggingface.co/t/open-to-the-community-xlsr-wav2vec2-fine-tuning-week-for-low-resource-languages/4467) on how to be added to Slack). More language-specific questions or specific bugs should be posted on the [forum](https://discuss.huggingface.co/) (feel free to use already existing language-specific threads, *e.g.* [this one](https://discuss.huggingface.co/t/arabic-asr-fine-tuning-wav2vec2/4608) or open a new one if there is no thread for your language yet) or directly on [github](https://github.com/huggingface/transformers) if you think some code or document needs correction/improvement. Starting on Monday, the 22.03.2021, the Hugging Face team will try to provide an overview of currently trained models along with their evaluation results. All the necessary information on: - How to fine-tune the XLSR model - How to upload the model - How to share your evaluation results & training/eval script - What are the training/evaluation rules can be found in the sections below. If something is still unclear, feel free to drop a message in the Slack channel. ## How to fine tune XLSR Wav2Vec2 This chapter gives an in-detail explanation of how to fine-tune [Facebook's multi-lingual Wav2vec2](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on any language of the [Common Voice dataset](https://commonvoice.mozilla.org/en/datasets). Two possible setups can be used to fine-tune Wav2Vec2. The easiest setup is to simply use [google colab](https://colab.research.google.com/). It is possible to train the full model in a *free* google colab, but it is recommended to use google colab pro since it is more stable. The other option is to run a script locally. While this can be more difficult to set up, it also means that you have more control over the training run and probably access to better GPUs than you would have in a google colab. For small datasets, it is usually totally sufficient to train your model in a google colab. For larger and thus more memory-intensive datasets, it is probably better to fine-tune the model locally. For each option, we explain in detail how to fine-tune XLSR-Wav2Vec2 in the following. ### Google colab setup **Note**: Instead of reading the following section, you can simply watch [this](https://www.youtube.com/watch?v=UynYn2C3tI0&ab_channel=PatrickvonPlaten) video, where Patrick explains how to adapt the google colab for your specific language. **1.**: If you plan on training XLSR-Wav2Vec2 in a google colab, you should first make sure to have a valid gmail account. You can sign up for a gmail account [here](https://accounts.google.com/signup/v2/webcreateaccount?hl=en&flowName=GlifWebSignIn&flowEntry=SignUp). Having successfully signed up for gmail, you can now sign in to your account to make sure you are logged in when opening new tabs in your browser. **2.**: Next, head over to the official [Fine-Tune XLSR-Wav2Vec2 with 🤗 Transformes](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/Fine_Tune_XLSR_Wav2Vec2_on_Turkish_ASR_with_%F0%9F%A4%97_Transformers.ipynb) google colab. The first thing you should do is to make a copy of it - click `->File->Save a copy in Drive`. This should save a copy of the google colab in your google drive. **3.**: Now it is highly recommended to carefully read the google colab without running the cells yet. You should get an understanding of the model is trained and what you will have to change when training the model in a different language. Having done so, you can again head over to [Common Voice](https://commonvoice.mozilla.org/en/datasets) and pick a language you want to fine-tune [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on. Make sure you remember the language code (For each language, you can find it under the field "*Version*". It corresponds to **all characters before the first underscore**. *E.g.* for Greek it is *el*, while for Irish it is *ga-IE*. **4.**: Now you should replace the language code used for the demo of this colab, being *tr* for Turkish with the language code corresponding to the language you just chose in the **second** cell of the google colab. This will load the correct data for your language. **5.**: It is time to start running the google colab! Make sure that you have selected "GPU" as your runtime environment and you can start running the cells one-by-one. Make sure you attentively read the text between the cells to understand what is happening and to eventually correct the cells to improve the fine-tuning script for your language. Things you might want to improve/change: - Data loading. It is very much recommended to use more than just the official training data of the Common Voice dataset. If you find more data on the internet, feel free to use it! Check out the section ["How to combined multiple datasets into one"](#how-to-combine-multiple-datasets-into-one) - Data Processing. You should adapt the data processing to your specific language. In data processing, you should make the data more uniform so that it will be easier for the model to learn how to classify speech in your data. Here it can be really helpful to be proficient in the language to know what can be done to simplify the language without changing the meaning. Data processing methods include, but are not limited to: - Normalizing your data. Make sure all characters are lower-cased. - Remove typographical symbols and punctuation marks. See a list [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks). Be careful to not remove punctuation marks that can change the meaning of the sentence. *E.g.* you should not remove the single quotation mark `'` in English, as it would change the words `"it's"` to `"its"` which is a different word and has thus a different meaning. For more tips on data processing see ["How to effectively preprocess the data"](#how-to-effectively-preprocess-the-data") - Hyperparameter Tuning. Depending on the size of the data you should probably change the hyperparameters of the google colab. You can change any parameter you like. For more tips and tricks see ["How to do hyperparameter tuning for my language"](#how-to-do-hyperparameter-tuning-for-my-language) When running the google colab make sure that you uncomment the cell corresponding to mounting your google drive to the colab. This cell looks as follows: ```python # from google.colab import drive # drive.mount('/content/gdrive/') ``` Uncomment it, run it, and follow the instructions to mount your google drive. This way you can be sure that the model parameters and created tokenizer & feature extractor files are saved in **your** google drive. Also, make sure that you uncomment the cells corresponding to save the preprocessing files and trained model weights to your drive. Otherwise, you might lose a trained model if you google crashes. You should change the name of your model from `wav2vec2-large-xlsr-turkish-demo` to `wav2vec2-large-xlsr-{your_favorite_name}`. Those cells correspond to: ```python # processor.save_pretrained("/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo") ``` and the line: ```python output_dir="/content/gdrive/MyDrive/wav2vec2-large-xlsr-turkish-demo", ``` further below (which should already be uncommented). Having finished the training you should find the following files/folders under the folder `wav2vec2-large-xlsr-{your_favorite_name}` in your google drive: - `preprocessor_config.json` - the parameters of the feature extractor - `special_tokens_map.json` - the special token map of the tokenizer - `tokenizer_config.json` - the parameters of the tokenizer - `vocab.json` - the vocabulary of the tokenizer - `checkpoint-{...}/` - the saved checkpoints saved during training. Each checkpoint should contain the files: `config.json`, `optimizer.pt`, `pytorch_model.bin`, `scheduler.pt`, `training_args.bin`. The files `config.json` and `pytorch_model.bin` define your model. If you are happy with your training results it is time to upload your model! Download the following files to your local computer: **`preprocessor_config.json`, `special_tokens_map.json`, `tokenizer_config.json`, `vocab.json`, `config.json`, `pytorch_model.bin`**. Those files fully define a XLSR-Wav2Vec2 model checkpoint. Awesome you have successfully trained a XLSR-Wav2Vec2 model 😎. Now you can jump to the section ["How to upload my trained checkpoint"](#how-to-upload-my-trained-checkpoint) ### Local machine We have provided `run_common_voice.py` script to run fine-tuning on local machine. The script is similar to the colab but allows you to launch training using command line, save and continue training from previous checkpoints and launch training on multiple GPUs. For bigger datasets, we recommend to train Wav2Vec2 locally instead of in a google colab. 1. To begin with, we should clone transformers localy and install all the required packages. First, you need to clone the `transformers` repo with: ``` $ git clone https://github.com/huggingface/transformers.git ``` Second, head over to the `examples/research_projects/wav2vec2` directory, where the `run_common_voice.py` script is located. ``` $ cd transformers/examples/research_projects/wav2vec2 ``` Third, install the required packages. The packages are listed in the `requirements.txt` file and can be installed with ``` $ pip install -r requirements.txt ``` **Note**: Installing the latest version of `torchaudio` will also upgrade `torch` to it's latest stable version. If you are using specific version of `torch` then make sure to use the correct `torchaudio` version compatible with your version of `torch`. By default the `requirements.txt` will install the latest version of `torchaudio`. 2. Next, take a look at the `run_common_voice.py` script to get an understanding of how it works. In short the script does the following: - Load the given common voice dataset - Create vocab for the language - Load the model with given hyperparameters - Pre-process the dataset to input into the model - Run training - Run evaluation 3. The following examples show how you can launch fine-tuning for the common voice dataset. Here we will run the script on the *Turkish* Common Voice dataset for demonstration purposes. **To lanuch fine-tuninig on a single GPU:** ```bash python run_common_voice.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ # use this argument to specify the language code --output_dir=./wav2vec2-large-xlsr-turkish-demo \ --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="16" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --evaluation_strategy="steps" \ --save_steps="400" \ --eval_steps="400" \ --logging_steps="400" \ --save_total_limit="3" \ --freeze_feature_extractor \ --feat_proj_dropout="0.0" \ --layerdrop="0.1" \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train --do_eval ``` **To lanuch fine-tuninig on multiple GPUs:** ```bash python -m torch.distributed.launch \ --nproc_per_node 4 run_common_voice.py \ --model_name_or_path="facebook/wav2vec2-large-xlsr-53" \ --dataset_config_name="tr" \ # use this argument to specify the language code --output_dir=./wav2vec2-large-xlsr-turkish-demo \ --overwrite_output_dir \ --num_train_epochs="5" \ --per_device_train_batch_size="16" \ --learning_rate="3e-4" \ --warmup_steps="500" \ --evaluation_strategy="steps" \ --save_steps="400" \ --eval_steps="400" \ --logging_steps="400" \ --save_total_limit="3" \ --freeze_feature_extractor \ --feat_proj_dropout="0.0" \ --layerdrop="0.1" \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train --do_eval ``` The above command will launch the training on 4 GPUs. Use the `--nproc_per_node` option to specify the number of GPUs. Once the training is finished, the model and checkpoints will be saved under the directory specified by the `--output_dir` argument. 4. The script also allows you to resume training from the last saved checkpoint. To resume training from last saved checkpoint remove the `--overwrite_output_dir` option and run the same command again. And to continue training from a specific checkpoint, keep the `--overwrite_output_dir` option and pass the path of the checkpoint as `--model_name_or_path`. As the script is based on the `Trainer` API, refer to the [Trainer docs](https://huggingface.co/transformers/main_classes/trainer.html) for more information about ``Trainer`` and ``TrainingArguments``. [OVH cloud](https://www.ovh.com/world/) has generously offered free compute for this sprint. Please refer to [this video](https://www.youtube.com/watch?v=2hlkWAESMk8&ab_channel=Databuzzword) to get started with OVH. ## How to upload my trained checkpoint To upload your trained checkpoint, you have to create a new model repository on the 🤗 model hub, from this page: https://huggingface.co/new > You can also follow the more in-depth instructions [here](https://huggingface.co/transformers/model_sharing.html) if needed. Having created your model repository on the hub, you should clone it locally: ```bash git lfs install git clone https://huggingface.co/username/your-model-name ``` Then and add the following files that fully define a XLSR-Wav2Vec2 checkpoint into the repository. You should have added the following files. - `preprocessor_config.json` - `special_tokens_map.json` - `tokenizer_config.json` - `vocab.json` - `config.json` - `pytorch_model.bin` Having added the above files, you should run the following to push files to your model repository. ``` git add . && git commit -m "Add model files" && git push ``` The next **very important** step is to create the model card. For people to use your fine-tuned model it is important to understand: - What kind of model is it? - What is your model useful for? - What data was your model trained on? - How well does your model perform? All these questions should be answered in a model card which is the first thing people see when visiting your model on the hub under `https://huggingface.co/{your_username}/{your_modelname}`. **Note**: It is extremely important that you add this model card or else we cannot find your model and thus cannot take the model into account for the final evaluation. ### How to create the readme The model card is written in markdown (`.md`) and should be added by simply clicking on the "Add model card" button which is found on the top right corner. You are encouraged to copy-paste the following template into your model card. **Make sure that** instead of copying the output of the markdown file you copy the **raw** version of the following part. To get the raw version of this file, simply click on the "`raw`" button on the top right corner of this file next to "`blame`" and copy everything below the marker. Make sure that you read and consequently remove all #TODO: statements from the model card. <======================Copy **raw** version from here========================= --- language: {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. datasets: - common_voice #TODO: remove if you did not use the common voice dataset - TODO: add more datasets if you have used additional datasets. Make sure to use the exact same dataset name as the one found [here](https://huggingface.co/datasets). If the dataset can not be found in the official datasets, just give it a new name metrics: - wer tags: - audio - automatic-speech-recognition - speech - xlsr-fine-tuning-week license: apache-2.0 model-index: - name: {human_readable_name} #TODO: replace {human_readable_name} with a name of your model as it should appear on the leaderboard. It could be something like `Elgeish XLSR Wav2Vec2 Large 53` results: - task: name: Speech Recognition type: automatic-speech-recognition dataset: name: Common Voice {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. type: common_voice args: {lang_id} #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. metrics: - name: Test WER type: wer value: {wer_result_on_test} #TODO (IMPORTANT): replace {wer_result_on_test} with the WER error rate you achieved on the common_voice test set. It should be in the format XX.XX (don't add the % sign here). **Please** remember to fill out this value after you evaluated your model, so that your model appears on the leaderboard. If you fill out this model card before evaluating your model, please remember to edit the model card afterward to fill in your value --- # Wav2Vec2-Large-XLSR-53-{language} #TODO: replace language with your {language}, *e.g.* French Fine-tuned [facebook/wav2vec2-large-xlsr-53](https://huggingface.co/facebook/wav2vec2-large-xlsr-53) on {language} using the [Common Voice](https://huggingface.co/datasets/common_voice), ... and ... dataset{s}. #TODO: replace {language} with your language, *e.g.* French and eventually add more datasets that were used and eventually remove common voice if model was not trained on common voice When using this model, make sure that your speech input is sampled at 16kHz. ## Usage The model can be used directly (without a language model) as follows: ```python import torch import torchaudio from datasets import load_dataset from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor test_dataset = load_dataset("common_voice", "{lang_id}", split="test[:2%]") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) inputs = processor(test_dataset[:2]["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values, attention_mask=inputs.attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) print("Prediction:", processor.batch_decode(predicted_ids)) print("Reference:", test_dataset[:2]["sentence"]) ``` ## Evaluation The model can be evaluated as follows on the {language} test data of Common Voice. # TODO: replace #TODO: replace language with your {language}, *e.g.* French ```python import torch import torchaudio from datasets import load_dataset, load_metric from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor import re test_dataset = load_dataset("common_voice", "{lang_id}", split="test") #TODO: replace {lang_id} in your language code here. Make sure the code is one of the *ISO codes* of [this](https://huggingface.co/languages) site. wer = load_metric("wer") processor = Wav2Vec2Processor.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` model = Wav2Vec2ForCTC.from_pretrained("{model_id}") #TODO: replace {model_id} with your model id. The model id consists of {your_username}/{your_modelname}, *e.g.* `elgeish/wav2vec2-large-xlsr-53-arabic` model.to("cuda") chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“]' # TODO: adapt this list to include all special characters you removed from the data resampler = torchaudio.transforms.Resample(48_000, 16_000) # Preprocessing the datasets. # We need to read the aduio files as arrays def speech_file_to_array_fn(batch): batch["sentence"] = re.sub(chars_to_ignore_regex, '', batch["sentence"]).lower() speech_array, sampling_rate = torchaudio.load(batch["path"]) batch["speech"] = resampler(speech_array).squeeze().numpy() return batch test_dataset = test_dataset.map(speech_file_to_array_fn) # Preprocessing the datasets. # We need to read the aduio files as arrays def evaluate(batch): inputs = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True) with torch.no_grad(): logits = model(inputs.input_values.to("cuda"), attention_mask=inputs.attention_mask.to("cuda")).logits pred_ids = torch.argmax(logits, dim=-1) batch["pred_strings"] = processor.batch_decode(pred_ids) return batch result = test_dataset.map(evaluate, batched=True, batch_size=8) print("WER: {:2f}".format(100 * wer.compute(predictions=result["pred_strings"], references=result["sentence"]))) ``` **Test Result**: XX.XX % # TODO: write output of print here. IMPORTANT: Please remember to also replace {wer_result_on_test} at the top of with this value here. tags. ## Training The Common Voice `train`, `validation`, and ... datasets were used for training as well as ... and ... # TODO: adapt to state all the datasets that were used for training. The script used for training can be found [here](...) # TODO: fill in a link to your training script here. If you trained your model in a colab, simply fill in the link here. If you trained the model locally, it would be great if you could upload the training script on github and paste the link here. =======================To here===============================> Your model in then available under *huggingface.co/{your_username}/{your_chosen_xlsr-large_model_name}* for everybody to use 🎉. ## How to evaluate my trained checkpoint Having uploaded your model, you should now evaluate your model in a final step. This should be as simple as copying the evaluation code of your model card into a python script and running it. Make sure to note the final result on the model card **both** under the YAML tags at the very top **and** below your evaluation code under "Test Results". ## Rules of training and evaluation In this section, we will quickly go over what data is allowed to be used as training data, what kind of data preprocessing is allowed be used, and how the model should be evaluated. To make it very simple regarding the first point: **All data except the official common voice `test` data set can be used as training data**. For models trained in a language that is not included in Common Voice, the author of the model is responsible to leave a reasonable amount of data for evaluation. Second, the rules regarding the preprocessing are not that as straight-forward. It is allowed (and recommended) to normalize the data to only have lower-case characters. It is also allowed (and recommended) to remove typographical symbols and punctuation marks. A list of such symbols can *e.g.* be fonud [here](https://en.wikipedia.org/wiki/List_of_typographical_symbols_and_punctuation_marks) - however here we already must be careful. We should **not** remove a symbol that would change the meaning of the words, *e.g.* in English, we should not remove the single quotation mark `'` since it would change the meaning of the word `"it's"` to `"its"` which would then be incorrect. So the golden rule here is to not remove any characters that could change the meaning of a word into another word. This is not always obvious and should be given some consideration. As another example, it is fine to remove the "Hypen-minus" sign "`-`" since it doesn't change the meaninng of a word to another one. *E.g.* "`fine-tuning`" would be changed to "`finetuning`" which has still the same meaning. Since those choices are not always obvious when in doubt feel free to ask on Slack or even better post on the forum, as was done, *e.g.* [here](https://discuss.huggingface.co/t/spanish-asr-fine-tuning-wav2vec2/4586). ## Tips and tricks This section summarizes a couple of tips and tricks across various topics. It will continously be updated during the week. ### How to combine multiple datasets into one Check out [this](https://discuss.huggingface.co/t/how-to-combine-local-data-files-with-an-official-dataset/4685) post. ### How to effectively preprocess the data ### How to do efficiently load datasets with limited ram and hard drive space Check out [this](https://discuss.huggingface.co/t/german-asr-fine-tuning-wav2vec2/4558/8?u=patrickvonplaten) post. ### How to do hyperparameter tuning ### How to preprocess and evaluate character based languages ## Further reading material It is recommended that take some time to read up on how Wav2vec2 works in theory. Getting a better understanding of the theory and the inner mechanisms of the model often helps when fine-tuning the model. **However**, if you don't like reading blog posts/papers, don't worry - it is by no means necessary to go through the theory to fine-tune Wav2Vec2 on your language of choice. If you are interested in learning more about the model though, here are a couple of resources that are important to better understand Wav2Vec2: - [Facebook's Wav2Vec2 blog post](https://ai.facebook.com/blog/wav2vec-state-of-the-art-speech-recognition-through-self-supervision/) - [Official Wav2Vec2 paper](https://arxiv.org/abs/2006.11477) - [Official XLSR Wav2vec2 paper](https://arxiv.org/pdf/2006.13979.pdf) - [Hugging Face Blog](https://huggingface.co/blog/fine-tune-xlsr-wav2vec2) - [How does CTC (Connectionist Temporal Classification) work](https://distill.pub/2017/ctc/) It helps to have a good understanding of the following points: - How was XLSR-Wav2Vec2 pretrained? -> Feature vectors were masked and had to be predicted by the model; very similar in spirit to masked language model of BERT. - What parts of XLSR-Wav2Vec2 are responsible for what? What is the feature extractor part used for? -> extract feature vectors from the 1D raw audio waveform; What is the transformer part doing? -> mapping feature vectors to contextualized feature vectors; ... - What part of the model needs to be fine-tuned? -> The pretrained model **does not** include a language head to classify the contextualized features to letters. This is randomly initialized when loading the pretrained checkpoint and has to be fine-tuned. Also, note that the authors recommend to **not** further fine-tune the feature extractor. - What data was used to XLSR-Wav2Vec2? The checkpoint we will use for further fine-tuning was pretrained on **53** languages. - What languages are considered to be similar by XLSR-Wav2Vec2? In the official [XLSR Wav2Vec2 paper](https://arxiv.org/pdf/2006.13979.pdf), the authors show nicely which languages share a common contextualized latent space. It might be useful for you to extend your training data with data of other languages that are considered to be very similar by the model (or you). ## FAQ - Can a participant fine-tune models for more than one language? Yes! A participant can fine-tune models in as many languages she/he likes - Can a participant use extra data (apart from the common voice data)? Yes! All data except the official common voice `test data` can be used for training. If a participant wants to train a model on a language that is not part of Common Voice (which is very much encouraged!), the participant should make sure that some test data is held out to make sure the model is not overfitting. - Can we fine-tune for high-resource languages? Yes! While we do not really recommend people to fine-tune models in English since there are already so many fine-tuned speech recognition models in English. However, it is very much appreciated if participants want to fine-tune models in other "high-resource" languages, such as French, Spanish, or German. For such cases, one probably needs to train locally and apply might have to apply tricks such as lazy data loading (check the ["Lazy data loading"](#how-to-do-lazy-data-loading) section for more details).
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero2.json
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 2, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "find_unused_parameters": true, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/ds_config_wav2vec2_zero3.json
{ "fp16": { "enabled": "auto", "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 16, "hysteresis": 2, "min_loss_scale": 1 }, "optimizer": { "type": "AdamW", "params": { "lr": "auto", "betas": "auto", "eps": "auto", "weight_decay": "auto" } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": "auto", "warmup_max_lr": "auto", "warmup_num_steps": "auto" } }, "zero_optimization": { "stage": 3, "offload_optimizer": { "device": "cpu", "pin_memory": true }, "offload_param": { "device": "cpu", "pin_memory": true }, "overlap_comm": true, "contiguous_gradients": true, "sub_group_size": 1e9, "reduce_bucket_size": "auto", "stage3_prefetch_bucket_size": "auto", "stage3_param_persistence_threshold": "auto", "stage3_max_live_parameters": 1e9, "stage3_max_reuse_distance": 1e9, "stage3_gather_16bit_weights_on_model_save": true }, "gradient_accumulation_steps": "auto", "gradient_clipping": "auto", "steps_per_print": 2000, "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false }
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/finetune_large_lv60_timit_asr.sh
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-large-lv60-timit-asr" \ --num_train_epochs="30" \ --per_device_train_batch_size="2" \ --per_device_eval_batch_size="2" \ --gradient_accumulation_steps="4" \ --evaluation_strategy="steps" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-large-lv60" \ --fp16 \ --dataset_name="timit_asr" \ --train_split_name="train" \ --validation_split_name="test" \ --orthography="timit" \ --preprocessing_num_workers="$(nproc)" \ --group_by_length \ --freeze_feature_extractor \ --verbose_logging \
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/run_alignment.sh
#!/usr/bin/env bash python alignment.py \ --model_name="arijitx/wav2vec2-xls-r-300m-bengali" \ --wav_dir="./wavs" \ --text_file="script.txt" \ --input_wavs_sr=48000 \ --output_dir="./out_alignment" \ --cuda
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/wav2vec2/finetune_base_100.sh
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-base-100h" \ --num_train_epochs="30" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --evaluation_strategy="steps" \ --save_total_limit="3" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-base" \ --fp16 \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="32" \ --group_by_length \ --freeze_feature_extractor
0
hf_public_repos/transformers/examples/research_projects/wav2vec2
hf_public_repos/transformers/examples/research_projects/wav2vec2/vocab/buckwalter.json
{ "<pad>": 0, "<s>": 1, "</s>": 2, "<unk>": 3, "/": 4, "'": 5, "|": 6, ">": 7, "&": 8, "<": 9, "}": 10, "A": 11, "b": 12, "p": 13, "t": 14, "v": 15, "j": 16, "H": 17, "x": 18, "d": 19, "*": 20, "r": 21, "z": 22, "s": 23, "$": 24, "S": 25, "D": 26, "T": 27, "Z": 28, "E": 29, "g": 30, "_": 31, "f": 32, "q": 33, "k": 34, "l": 35, "m": 36, "n": 37, "h": 38, "w": 39, "Y": 40, "y": 41, "F": 42, "N": 43, "K": 44, "a": 45, "u": 46, "i": 47, "~": 48, "o": 49, "`": 50, "{": 51, "P": 52, "J": 53, "V": 54, "G": 55 }
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/decision_transformer/requirements.txt
absl-py==1.0.0 aiohttp==3.8.5 aiosignal==1.2.0 alembic==1.7.7 appdirs==1.4.4 APScheduler==3.9.1 arrow==1.2.2 asttokens==2.0.5 astunparse==1.6.3 async-timeout==4.0.2 attrs==21.4.0 audioread==2.1.9 autopage==0.5.0 backcall==0.2.0 backoff==1.11.1 backports.zoneinfo==0.2.1 binaryornot==0.4.4 black==22.1.0 boto3==1.16.34 botocore==1.19.63 Brotli==1.0.9 cachetools==5.0.0 certifi==2023.7.22 cffi==1.15.0 chardet==4.0.0 charset-normalizer==2.0.12 chex==0.1.1 click==8.0.4 cliff==3.10.1 clldutils==3.11.1 cloudpickle==2.0.0 cmaes==0.8.2 cmd2==2.4.0 codecarbon==1.2.0 colorlog==6.6.0 cookiecutter==2.1.1 cryptography==41.0.2 csvw==2.0.0 cycler==0.11.0 Cython==0.29.28 dash==2.3.0 dash-bootstrap-components==1.0.3 dash-core-components==2.0.0 dash-html-components==2.0.0 dash-table==5.0.0 datasets==2.0.0 decorator==5.1.1 Deprecated==1.2.13 dill==0.3.4 dlinfo==1.2.1 dm-tree==0.1.6 docker==4.4.4 execnet==1.9.0 executing==0.8.3 faiss-cpu==1.7.2 fasteners==0.17.3 filelock==3.6.0 fire==0.4.0 flake8==4.0.1 Flask==2.3.2 Flask-Compress==1.11 flatbuffers==2.0 flax==0.4.0 fonttools==4.31.1 frozenlist==1.3.0 fsspec==2022.2.0 fugashi==1.1.2 gast==0.5.3 gitdb==4.0.9 GitPython==3.1.30 glfw==2.5.1 google-auth==2.6.2 google-auth-oauthlib==0.4.6 google-pasta==0.2.0 greenlet==1.1.2 grpcio==1.44.0 gym==0.23.1 gym-notices==0.0.6 h5py==3.6.0 huggingface-hub==0.4.0 hypothesis==6.39.4 idna==3.3 imageio==2.16.1 importlib-metadata==4.11.3 importlib-resources==5.4.0 iniconfig==1.1.1 ipadic==1.0.0 ipython==8.10.0 isodate==0.6.1 isort==5.10.1 itsdangerous==2.1.1 jax==0.3.4 jaxlib==0.3.2 jedi==0.18.1 Jinja2==2.11.3 jinja2-time==0.2.0 jmespath==0.10.0 joblib==1.2.0 jsonschema==4.4.0 keras==2.8.0 Keras-Preprocessing==1.1.2 kiwisolver==1.4.0 kubernetes==12.0.1 libclang==13.0.0 librosa==0.9.1 llvmlite==0.38.0 Mako==1.2.2 Markdown==3.3.6 MarkupSafe==1.1.1 matplotlib==3.5.1 matplotlib-inline==0.1.3 mccabe==0.6.1 msgpack==1.0.3 mujoco-py==2.1.2.14 multidict==6.0.2 multiprocess==0.70.12.2 mypy-extensions==0.4.3 nltk==3.7 numba==0.55.1 numpy==1.22.3 oauthlib==3.2.2 onnx==1.13.0 onnxconverter-common==1.9.0 opt-einsum==3.3.0 optax==0.1.1 optuna==2.10.0 packaging==21.3 pandas==1.4.1 parameterized==0.8.1 parso==0.8.3 pathspec==0.9.0 pbr==5.8.1 pexpect==4.8.0 phonemizer==3.0.1 pickleshare==0.7.5 Pillow==9.3.0 Pint==0.16.1 plac==1.3.4 platformdirs==2.5.1 plotly==5.6.0 pluggy==1.0.0 pooch==1.6.0 portalocker==2.0.0 poyo==0.5.0 prettytable==3.2.0 prompt-toolkit==3.0.28 protobuf==3.19.5 psutil==5.9.0 ptyprocess==0.7.0 pure-eval==0.2.2 py==1.11.0 py-cpuinfo==8.0.0 pyarrow==7.0.0 pyasn1==0.4.8 pyasn1-modules==0.2.8 pycodestyle==2.8.0 pycparser==2.21 pyctcdecode==0.3.0 pyflakes==2.4.0 Pygments==2.15.0 pygtrie==2.4.2 pynvml==11.4.1 pyOpenSSL==22.0.0 pyparsing==3.0.7 pyperclip==1.8.2 pypng==0.0.21 pyrsistent==0.18.1 pytest==7.1.1 pytest-forked==1.4.0 pytest-timeout==2.1.0 pytest-xdist==2.5.0 python-dateutil==2.8.2 python-slugify==6.1.1 pytz==2022.1 pytz-deprecation-shim==0.1.0.post0 PyYAML==6.0 ray==1.11.0 redis==4.5.4 regex==2022.3.15 requests==2.31.0 requests-oauthlib==1.3.1 resampy==0.2.2 responses==0.18.0 rfc3986==1.5.0 rouge-score==0.0.4 rsa==4.8 s3transfer==0.3.7 sacrebleu==1.5.1 sacremoses==0.0.49 scikit-learn==1.0.2 scipy==1.8.0 segments==2.2.0 sentencepiece==0.1.96 sigopt==8.2.0 six==1.16.0 smmap==5.0.0 sortedcontainers==2.4.0 SoundFile==0.10.3.post1 SQLAlchemy==1.4.32 stack-data==0.2.0 stevedore==3.5.0 tabulate==0.8.9 tenacity==8.0.1 tensorboard==2.8.0 tensorboard-data-server==0.6.1 tensorboard-plugin-wit==1.8.1 tensorboardX==2.5 tensorflow==2.8.1 tensorflow-io-gcs-filesystem==0.24.0 termcolor==1.1.0 text-unidecode==1.3 tf-estimator-nightly==2.8.0.dev2021122109 tf2onnx==1.9.3 threadpoolctl==3.1.0 timeout-decorator==0.5.0 timm==0.5.4 tokenizers==0.11.6 tomli==2.0.1 toolz==0.11.2 torch==1.11.0 torchaudio==0.11.0 torchvision==0.12.0 tqdm==4.63.0 traitlets==5.1.1 -e git+git@github.com:edbeeching/transformers.git@77b90113ca0a0e4058b046796c874bdc98f1da61#egg=transformers typing-extensions==4.1.1 tzdata==2022.1 tzlocal==4.1 unidic==1.1.0 unidic-lite==1.0.8 uritemplate==4.1.1 urllib3==1.26.9 wasabi==0.9.0 wcwidth==0.2.5 websocket-client==1.3.1 Werkzeug==2.2.3 wrapt==1.14.0 xxhash==3.0.0 yarl==1.7.2 zipp==3.7.0
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/decision_transformer/run_decision_transformer.py
import gym import numpy as np import torch from mujoco_py import GlfwContext from transformers import DecisionTransformerModel GlfwContext(offscreen=True) # Create a window to init GLFW. def get_action(model, states, actions, rewards, returns_to_go, timesteps): # we don't care about the past rewards in this model states = states.reshape(1, -1, model.config.state_dim) actions = actions.reshape(1, -1, model.config.act_dim) returns_to_go = returns_to_go.reshape(1, -1, 1) timesteps = timesteps.reshape(1, -1) if model.config.max_length is not None: states = states[:, -model.config.max_length :] actions = actions[:, -model.config.max_length :] returns_to_go = returns_to_go[:, -model.config.max_length :] timesteps = timesteps[:, -model.config.max_length :] # pad all tokens to sequence length attention_mask = torch.cat( [torch.zeros(model.config.max_length - states.shape[1]), torch.ones(states.shape[1])] ) attention_mask = attention_mask.to(dtype=torch.long, device=states.device).reshape(1, -1) states = torch.cat( [ torch.zeros( (states.shape[0], model.config.max_length - states.shape[1], model.config.state_dim), device=states.device, ), states, ], dim=1, ).to(dtype=torch.float32) actions = torch.cat( [ torch.zeros( (actions.shape[0], model.config.max_length - actions.shape[1], model.config.act_dim), device=actions.device, ), actions, ], dim=1, ).to(dtype=torch.float32) returns_to_go = torch.cat( [ torch.zeros( (returns_to_go.shape[0], model.config.max_length - returns_to_go.shape[1], 1), device=returns_to_go.device, ), returns_to_go, ], dim=1, ).to(dtype=torch.float32) timesteps = torch.cat( [ torch.zeros( (timesteps.shape[0], model.config.max_length - timesteps.shape[1]), device=timesteps.device ), timesteps, ], dim=1, ).to(dtype=torch.long) else: attention_mask = None _, action_preds, _ = model( states=states, actions=actions, rewards=rewards, returns_to_go=returns_to_go, timesteps=timesteps, attention_mask=attention_mask, return_dict=False, ) return action_preds[0, -1] # build the environment env = gym.make("Hopper-v3") state_dim = env.observation_space.shape[0] act_dim = env.action_space.shape[0] max_ep_len = 1000 device = "cuda" scale = 1000.0 # normalization for rewards/returns TARGET_RETURN = 3600 / scale # evaluation conditioning targets, 3600 is reasonable from the paper LINK state_mean = np.array( [ 1.311279, -0.08469521, -0.5382719, -0.07201576, 0.04932366, 2.1066856, -0.15017354, 0.00878345, -0.2848186, -0.18540096, -0.28461286, ] ) state_std = np.array( [ 0.17790751, 0.05444621, 0.21297139, 0.14530419, 0.6124444, 0.85174465, 1.4515252, 0.6751696, 1.536239, 1.6160746, 5.6072536, ] ) state_mean = torch.from_numpy(state_mean).to(device=device) state_std = torch.from_numpy(state_std).to(device=device) # Create the decision transformer model model = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-medium") model = model.to(device) model.eval() for ep in range(10): episode_return, episode_length = 0, 0 state = env.reset() target_return = torch.tensor(TARGET_RETURN, device=device, dtype=torch.float32).reshape(1, 1) states = torch.from_numpy(state).reshape(1, state_dim).to(device=device, dtype=torch.float32) actions = torch.zeros((0, act_dim), device=device, dtype=torch.float32) rewards = torch.zeros(0, device=device, dtype=torch.float32) timesteps = torch.tensor(0, device=device, dtype=torch.long).reshape(1, 1) for t in range(max_ep_len): env.render() # add padding actions = torch.cat([actions, torch.zeros((1, act_dim), device=device)], dim=0) rewards = torch.cat([rewards, torch.zeros(1, device=device)]) action = get_action( model, (states.to(dtype=torch.float32) - state_mean) / state_std, actions.to(dtype=torch.float32), rewards.to(dtype=torch.float32), target_return.to(dtype=torch.float32), timesteps.to(dtype=torch.long), ) actions[-1] = action action = action.detach().cpu().numpy() state, reward, done, _ = env.step(action) cur_state = torch.from_numpy(state).to(device=device).reshape(1, state_dim) states = torch.cat([states, cur_state], dim=0) rewards[-1] = reward pred_return = target_return[0, -1] - (reward / scale) target_return = torch.cat([target_return, pred_return.reshape(1, 1)], dim=1) timesteps = torch.cat([timesteps, torch.ones((1, 1), device=device, dtype=torch.long) * (t + 1)], dim=1) episode_return += reward episode_length += 1 if done: break
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/layoutlmv3/requirements.txt
datasets seqeval pillow
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/layoutlmv3/README.md
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Token classification with LayoutLMv3 (PyTorch version) This directory contains a script, `run_funsd_cord.py`, that can be used to fine-tune (or evaluate) LayoutLMv3 on form understanding datasets, such as [FUNSD](https://guillaumejaume.github.io/FUNSD/) and [CORD](https://github.com/clovaai/cord). The script `run_funsd_cord.py` leverages the 🤗 Datasets library and the Trainer API. You can easily customize it to your needs. ## Fine-tuning on FUNSD Fine-tuning LayoutLMv3 for token classification on [FUNSD](https://guillaumejaume.github.io/FUNSD/) can be done as follows: ```bash python run_funsd_cord.py \ --model_name_or_path microsoft/layoutlmv3-base \ --dataset_name funsd \ --output_dir layoutlmv3-test \ --do_train \ --do_eval \ --max_steps 1000 \ --evaluation_strategy steps \ --eval_steps 100 \ --learning_rate 1e-5 \ --load_best_model_at_end \ --metric_for_best_model "eval_f1" \ --push_to_hub \ --push_to_hub°model_id layoutlmv3-finetuned-funsd ``` 👀 The resulting model can be found here: https://huggingface.co/nielsr/layoutlmv3-finetuned-funsd. By specifying the `push_to_hub` flag, the model gets uploaded automatically to the hub (regularly), together with a model card, which includes metrics such as precision, recall and F1. Note that you can easily update the model card, as it's just a README file of the respective repo on the hub. There's also the "Training metrics" [tab](https://huggingface.co/nielsr/layoutlmv3-finetuned-funsd/tensorboard), which shows Tensorboard logs over the course of training. Pretty neat, huh? ## Fine-tuning on CORD Fine-tuning LayoutLMv3 for token classification on [CORD](https://github.com/clovaai/cord) can be done as follows: ```bash python run_funsd_cord.py \ --model_name_or_path microsoft/layoutlmv3-base \ --dataset_name cord \ --output_dir layoutlmv3-test \ --do_train \ --do_eval \ --max_steps 1000 \ --evaluation_strategy steps \ --eval_steps 100 \ --learning_rate 5e-5 \ --load_best_model_at_end \ --metric_for_best_model "eval_f1" \ --push_to_hub \ --push_to_hub°model_id layoutlmv3-finetuned-cord ``` 👀 The resulting model can be found here: https://huggingface.co/nielsr/layoutlmv3-finetuned-cord. Note that a model card gets generated automatically in case you specify the `push_to_hub` flag.
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/layoutlmv3/run_funsd_cord.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning LayoutLMv3 for token classification on FUNSD or CORD. """ # You can also adapt this script on your own token classification task and datasets. Pointers for this are left as # comments. import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import numpy as np from datasets import ClassLabel, load_dataset, load_metric import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoProcessor, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.data.data_collator import default_data_collator from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.19.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/token-classification/requirements.txt") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="microsoft/layoutlmv3-base", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) processor_name: Optional[str] = field( default=None, metadata={"help": "Name or path to the processor files if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default="nielsr/funsd-layoutlmv3", metadata={"help": "The name of the dataset to use (via the datasets library)."}, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) text_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of text to input in the file (a csv or JSON file)."} ) label_column_name: Optional[str] = field( default=None, metadata={"help": "The column name of label to input in the file (a csv or JSON file)."} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_seq_length: int = field( default=512, metadata={ "help": ( "The maximum total input sequence length after tokenization. If set, sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) label_all_tokens: bool = field( default=False, metadata={ "help": ( "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." ) }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) log_level = training_args.get_process_log_level() logger.setLevel(log_level) datasets.utils.logging.set_verbosity(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name == "funsd": # Downloading and loading a dataset from the hub. dataset = load_dataset( "nielsr/funsd-layoutlmv3", data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) elif data_args.dataset_name == "cord": # Downloading and loading a dataset from the hub. dataset = load_dataset( "nielsr/cord-layoutlmv3", data_args.dataset_config_name, cache_dir=model_args.cache_dir, use_auth_token=True if model_args.use_auth_token else None, ) else: raise ValueError("This script only supports either FUNSD or CORD out-of-the-box.") if training_args.do_train: column_names = dataset["train"].column_names features = dataset["train"].features else: column_names = dataset["test"].column_names features = dataset["test"].features image_column_name = "image" text_column_name = "words" if "words" in column_names else "tokens" boxes_column_name = "bboxes" label_column_name = ( f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] ) remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list # If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere. # Otherwise, we have to get the list of labels manually. if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. id2label = dict(enumerate(label_list)) label2id = {v: k for k, v in enumerate(label_list)} else: label_list = get_label_list(datasets["train"][label_column_name]) id2label = dict(enumerate(label_list)) label2id = {v: k for k, v in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) processor = AutoProcessor.from_pretrained( model_args.processor_name if model_args.processor_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, add_prefix_space=True, apply_ocr=False, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Set the correspondences label/ID inside the model config model.config.label2id = label2id model.config.id2label = id2label # Preprocessing the dataset # The processor does everything for us (prepare the image using LayoutLMv3ImageProcessor # and prepare the words, boxes and word-level labels using LayoutLMv3TokenizerFast) def prepare_examples(examples): images = examples[image_column_name] words = examples[text_column_name] boxes = examples[boxes_column_name] word_labels = examples[label_column_name] encoding = processor( images, words, boxes=boxes, word_labels=word_labels, truncation=True, padding="max_length", max_length=data_args.max_seq_length, ) return encoding if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") train_dataset = dataset["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) with training_args.main_process_first(desc="train dataset map pre-processing"): train_dataset = train_dataset.map( prepare_examples, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: validation_name = "test" if validation_name not in dataset: raise ValueError("--do_eval requires a validation dataset") eval_dataset = dataset[validation_name] if data_args.max_eval_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_eval_samples)) with training_args.main_process_first(desc="validation dataset map pre-processing"): eval_dataset = eval_dataset.map( prepare_examples, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") predict_dataset = datasets["test"] if data_args.max_predict_samples is not None: max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) predict_dataset = predict_dataset.select(range(max_predict_samples)) with training_args.main_process_first(desc="prediction dataset map pre-processing"): predict_dataset = predict_dataset.map( prepare_examples, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=processor, data_collator=default_data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(predict_dataset, metric_key_prefix="predict") predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) # Save predictions output_predictions_file = os.path.join(training_args.output_dir, "predictions.txt") if trainer.is_world_process_zero(): with open(output_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "token-classification"} if data_args.dataset_name is not None: kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: kwargs["dataset_args"] = data_args.dataset_config_name kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mlm_wwm/requirements.txt
datasets >= 1.1.3 sentencepiece != 0.1.92 protobuf ltp
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mlm_wwm/README.md
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ## Whole Word Mask Language Model These scripts leverage the 🤗 Datasets library and the Trainer API. You can easily customize them to your needs if you need extra processing on your datasets. The following examples, will run on a datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation. We give examples of both below. The BERT authors released a new version of BERT using Whole Word Masking in May 2019. Instead of masking randomly selected tokens (which may be part of words), they mask randomly selected words (masking all the tokens corresponding to that word). This technique has been refined for Chinese in [this paper](https://arxiv.org/abs/1906.08101). To fine-tune a model using whole word masking, use the following script: ```bash python run_mlm_wwm.py \ --model_name_or_path roberta-base \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --do_train \ --do_eval \ --output_dir /tmp/test-mlm-wwm ``` For Chinese models, we need to generate a reference files (which requires the ltp library), because it's tokenized at the character level. **Q :** Why a reference file? **A :** Suppose we have a Chinese sentence like: `我喜欢你` The original Chinese-BERT will tokenize it as `['我','喜','欢','你']` (character level). But `喜欢` is a whole word. For whole word masking proxy, we need a result like `['我','喜','##欢','你']`, so we need a reference file to tell the model which position of the BERT original token should be added `##`. **Q :** Why LTP ? **A :** Cause the best known Chinese WWM BERT is [Chinese-BERT-wwm](https://github.com/ymcui/Chinese-BERT-wwm) by HIT. It works well on so many Chines Task like CLUE (Chinese GLUE). They use LTP, so if we want to fine-tune their model, we need LTP. You could run the following: ```bash export TRAIN_FILE=/path/to/train/file export LTP_RESOURCE=/path/to/ltp/tokenizer export BERT_RESOURCE=/path/to/bert/tokenizer export SAVE_PATH=/path/to/data/ref.txt python run_chinese_ref.py \ --file_name=$TRAIN_FILE \ --ltp=$LTP_RESOURCE \ --bert=$BERT_RESOURCE \ --save_path=$SAVE_PATH ``` Then you can run the script like this: ```bash export TRAIN_FILE=/path/to/train/file export VALIDATION_FILE=/path/to/validation/file export TRAIN_REF_FILE=/path/to/train/chinese_ref/file export VALIDATION_REF_FILE=/path/to/validation/chinese_ref/file export OUTPUT_DIR=/tmp/test-mlm-wwm python run_mlm_wwm.py \ --model_name_or_path roberta-base \ --train_file $TRAIN_FILE \ --validation_file $VALIDATION_FILE \ --train_ref_file $TRAIN_REF_FILE \ --validation_ref_file $VALIDATION_REF_FILE \ --do_train \ --do_eval \ --output_dir $OUTPUT_DIR ``` **Note1:** On TPU, you should the flag `--pad_to_max_length` to make sure all your batches have the same length. **Note2:** And if you have any questions or something goes wrong when runing this code, don't hesitate to pin @wlhgtc.
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mlm_wwm/run_chinese_ref.py
import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def _is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def is_chinese(word: str): # word like '180' or '身高' or '神' for char in word: char = ord(char) if not _is_chinese_char(char): return 0 return 1 def get_chinese_word(tokens: List[str]): word_set = set() for token in tokens: chinese_word = len(token) > 1 and is_chinese(token) if chinese_word: word_set.add(token) word_list = list(word_set) return word_list def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()): if not chinese_word_set: return bert_tokens max_word_len = max([len(w) for w in chinese_word_set]) bert_word = bert_tokens start, end = 0, len(bert_word) while start < end: single_word = True if is_chinese(bert_word[start]): l = min(end - start, max_word_len) for i in range(l, 1, -1): whole_word = "".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1, start + i): bert_word[j] = "##" + bert_word[j] start = start + i single_word = False break if single_word: start += 1 return bert_word def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer): ltp_res = [] for i in range(0, len(lines), 100): res = ltp_tokenizer.pipeline(lines[i : i + 100], tasks=["cws"]).cws res = [get_chinese_word(r) for r in res] ltp_res.extend(res) assert len(ltp_res) == len(lines) bert_res = [] for i in range(0, len(lines), 100): res = bert_tokenizer(lines[i : i + 100], add_special_tokens=True, truncation=True, max_length=512) bert_res.extend(res["input_ids"]) assert len(bert_res) == len(lines) ref_ids = [] for input_ids, chinese_word in zip(bert_res, ltp_res): input_tokens = [] for id in input_ids: token = bert_tokenizer._convert_id_to_token(id) input_tokens.append(token) input_tokens = add_sub_symbol(input_tokens, chinese_word) ref_id = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(input_tokens): if token[:2] == "##": clean_token = token[2:] # save chinese tokens' pos if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)): ref_id.append(i) ref_ids.append(ref_id) assert len(ref_ids) == len(bert_res) return ref_ids def main(args): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name, "r", encoding="utf-8") as f: data = f.readlines() data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # avoid delimiter like '\u2029' ltp_tokenizer = LTP(args.ltp) # faster in GPU device bert_tokenizer = BertTokenizer.from_pretrained(args.bert) ref_ids = prepare_ref(data, ltp_tokenizer, bert_tokenizer) with open(args.save_path, "w", encoding="utf-8") as f: data = [json.dumps(ref) + "\n" for ref in ref_ids] f.writelines(data) if __name__ == "__main__": parser = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) args = parser.parse_args() main(args)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mlm_wwm/run_mlm_wwm.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) def __post_init__(self): if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def add_chinese_references(dataset, ref_file): with open(ref_file, "r", encoding="utf-8") as f: refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] assert len(dataset) == len(refs) dataset_dict = {c: dataset[c] for c in dataset.column_names} dataset_dict["chinese_ref"] = refs return Dataset.from_dict(dataset_dict) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] return tokenizer(examples["text"], padding=padding, truncation=True, max_length=data_args.max_seq_length) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: tokenized_datasets["train"] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file) if data_args.validation_ref_file is not None: tokenized_datasets["validation"] = add_chinese_references( tokenized_datasets["validation"], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer has_ref = data_args.train_ref_file or data_args.validation_ref_file if has_ref: training_args.remove_unused_columns = False # Data collator # This one will take care of randomly masking the tokens. data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, "train_results.txt") if trainer.is_world_process_zero(): with open(output_train_file, "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/jax-projects/README.md
# Flax/JAX community week 🤗 Welcome to the Flax/JAX community week! The goal of this week is to make compute-intensive NLP and CV projects (like pre-training BERT, GPT2, CLIP, ViT) practicable for a wider audience of engineers and researchers. To do so, we will try to teach **you** how to effectively use JAX/Flax on TPU and help you to complete a fun NLP and/or CV project in JAX/Flax during the community week. Free access to a TPUv3-8 will kindly be provided by the Google Cloud team! In this document, we list all the important information that you will need during the Flax/JAX community week. Don't forget to sign up [here](https://forms.gle/tVGPhjKXyEsSgUcs8)! ## Table of Contents - [Organization](#organization) - [Important dates](#important-dates) - [Communication](#communication) - [Projects](#projects) - [How to propose](#how-to-propose-a-project) - [How to form a team](#how-to-form-a-team-around-a-project) - [Tips & Tricks for project](#tips-on-how-to-organize-the-project) - [How to install flax, jax, optax, transformers, datasets](#how-to-install-relevant-libraries) - [Quickstart Flax/JAX](#quickstart-flax-and-jax) - [Quickstart Flax/JAX in 🤗 Transformers](#quickstart-flax-and-jax-in-transformers) - [Flax design philosophy in 🤗 Transformers](#flax-design-philosophy-in-transformers) - [How to use flax models & scripts](#how-to-use-flax-models-and-example-scripts) - [Talks](#talks) - [How to use the 🤗 Hub for training](#how-to-use-the-hub-for-collaboration) - [How to setup TPU VM](#how-to-setup-tpu-vm) - [How to build a demo](#how-to-build-a-demo) - [Using the Hugging Face Widgets](#using-the-hugging-face-widgets) - [Using a Streamlit demo](#using-a-streamlit-demo) - [Using a Gradio demo](#using-a-gradio-demo) - [Project evaluation](#project-evaluation) - [General Tips & Tricks](#general-tips-and-tricks) - [FAQ](#faq) ## Organization Participants can propose ideas for an interesting NLP and/or CV project. Teams of 3 to 5 will then be formed around the most promising and interesting projects. Make sure to read through the [Projects](#projects) section on how to propose projects, comment on other participants' project ideas, and create a team. To help each team successfully finish their project, we have organized talks by leading scientists and engineers from Google, Hugging Face, and the open-source NLP & CV community. The talks will take place before the community week from June 30th to July 2nd. Make sure to attend the talks to get the most out of your participation! Check out the [Talks](#talks) section to get an overview of the talks, including the speaker and the time of the talk. Each team is then given **free access to a TPUv3-8 VM** from July 7th to July 14th. In addition, we will provide training examples in JAX/Flax for a variety of NLP and Vision models to kick-start your project. During the week, we'll make sure to answer any questions you might have about JAX/Flax and Transformers and help each team as much as possible to complete their project! At the end of the community week, each team should submit a demo of their project. All demonstrations will be evaluated by a jury and the top-3 demos will be awarded a prize. Check out the [How to submit a demo](#how-to-submit-a-demo) section for more information and suggestions on how to submit your project. ## Important dates - **23.06.** Official announcement of the community week. Make sure to sign-up in [this google form](https://forms.gle/tVGPhjKXyEsSgUcs8). - **23.06. - 30.06.** Participants will be added to an internal Slack channel. Project ideas can be proposed here and groups of 3-5 are formed. Read this document for more information. - **30.06.** Release of all relevant training scripts in JAX/Flax as well as other documents on how to set up a TPU, how to use the training scripts, how to submit a demo, tips & tricks for JAX/Flax, tips & tricks for efficient use of the hub. - **30.06. - 2.07.** Talks about JAX/Flax, TPU, Transformers, Computer Vision & NLP will be held. - **7.07.** Start of the community week! Access to TPUv3-8 will be given to each team. - **7.07. - 14.07.** The Hugging Face & JAX/Flax & Cloud team will be available for any questions, problems the teams might run into. - **15.07.** Access to TPU is deactivated and community week officially ends. - **16.07.** Deadline for each team to submit a demo. ## Communication All important communication will take place in an internal Slack channel, called `#flax-jax-community-week`. Important announcements of the Hugging Face, Flax/JAX, and Google Cloud team will be posted there. Such announcements include general information about the community week (Dates, Rules, ...), release of relevant training scripts (Flax/JAX example scripts for NLP and Vision), release of other important documents (How to access the TPU), etc. The Slack channel will also be the central place for participants to post about their results, share their learning experiences, ask questions, etc. For issues with Flax/JAX, Transformers, Datasets or for questions that are specific to your project we would be **very happy** if you could use the following public repositories and forums: - Flax: [Issues](https://github.com/google/flax/issues), [Questions](https://github.com/google/flax/discussions) - JAX: [Issues](https://github.com/google/jax/issues), [Questions](https://github.com/google/jax/discussions) - 🤗 Transformers: [Issues](https://github.com/huggingface/transformers/issues), [Questions](https://discuss.huggingface.co/c/transformers/9) - 🤗 Datasets: [Issues](https://github.com/huggingface/datasets/issues), [Questions](https://discuss.huggingface.co/c/datasets/10) - Project specific questions: [Forum](https://discuss.huggingface.co/c/flax-jax-projects/22) - TPU related questions: [TODO]() Please do **not** post the complete issue/project-specific question in the Slack channel, but instead a link to your issue/question that we will try to answer as soon as possible. This way, we make sure that the everybody in the community can benefit from your questions - even after the community week - and that the same question is not answered twice. To be invited to the Slack channel, please make sure you have signed up [on the Google form](https://forms.gle/tVGPhjKXyEsSgUcs8). **Note**: If you have signed up on the google form, but you are not in the Slack channel, please leave a message on [(TODO) the official forum announcement]( ) and ping `@Suzana` and `@patrickvonplaten`. ## Projects During the first week after the community week announcement, **23.06. - 30.06.**, teams will be formed around the most promising and interesting project ideas. Each team can consist of 2 to 10 participants. Projects can be accessed [here](https://discuss.huggingface.co/c/flax-jax-projects/22). All officially defined projects can be seen [here](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing). ### How to propose a project Some default project ideas are given by the organizers. **However, we strongly encourage participants to submit their own project ideas!** Check out the [HOW_TO_PROPOSE_PROJECT.md](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md) for more information on how to propose a new project. ### How to form a team around a project You can check out all existing projects ideas on the forum under [Flax/JAX projects category](https://discuss.huggingface.co/c/flax-jax-projects/22). Make sure to quickly check out each project idea and leave a ❤️ if you like an idea. Feel free to leave comments, suggestions for improvement, or questions about more details directly on the discussion thread. If you have found the project that you ❤️ the most, leave a message "I would like to join this project" on the discussion thread. We strongly advise you to also shortly state who you are, which time zone you are in and why you would like to work on this project, how you can contribute to the project and what your vision is for the project. For projects that see a lot of interest and for which enough participants have expressed interest in joining, an official team will be created by the organizers. One of the organizers (`@Suzana`, `@valhalla`, `@osanseviero`, `@patrickvonplaten`) will leave a message "For this project the team: `<team_name>`, `<team_members>` , is officially created" on the thread and note down the teams on [this google sheet](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing). Once created, the team can start refining their project: - What is the goal of the project? *E.g.*, Present a language model that writes poetry in Russian. - What model will we use? *E.g.*, FlaxGPT2 - What data will we use? *E.g.* Russian dataset of OSCAR & publicly available book on poetry - Should we use a pre-trained model or train a model from scratch? E.g. Train a model from scratch - What training scripts do we need? *E.g.* `transformers/examples/flax/run_clm_flax.py` can be used - What kind of demo would we like to present? E.g. Text-generation API of the 🤗 Hub in combination with a Streamlit demo that lets the user generate a poem of a given length - How will the work be divided? *E.g.* Team member 1 works on data preprocessing, Team member 2 works on adapting the Flax script, ... We highly recommend that each team discusses all relevant ideas for their project directly on the forum thread. This way valuable learning experiences are shared and accessible by the whole community in the future. Additionally, the organizers, other participants, or anybody in the community really can read through your discussions and leave comments/tips for improvement. Obviously, you can also create private chats, ... to discuss more sensitive topics, etc. **Important**: - For project ideas that see a lot of interest, we are more than happy to create more than one team. - Participants are welcome to join multiple teams, even though we encourage them to only work on a single project. - Under special circumstances, participants can change/create new teams. Please note that we would like to keep this the exception. If however, you would like to change/leave existing teams, please leave a post on the project's thread where you ping the corresponding organizer that created the group. - It is often easy to propose/join a project that is done in your native language. Feel free to reach out to existing [language-specific groups](https://discuss.huggingface.co/c/languages-at-hugging-face/15) to look for community members that might be interested in joining your project. ## Tips on how to organize the project This section gives you some tips on how to most efficiently & effectively work as a team to achieve your goal. It is by no means a strict recipe to follow, but rather a collection of tips from the 🤗 team. Once your team is defined, you can start working on the project as soon as possible. ### Communication At first, it is always useful to get to know each other and to set up a means of communication. While we recommend that all technical aspects of work can be discussed directly on the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22) under your project thread, it can be very helpful to have a more direct way of communicating, *e.g.* in a channel. For this we have created a discord that you can access [here](https://discord.com/channels/858019234139602994/858019234139602997). This discord will not be managed by anybody and is just there so that you can communicate more effectively with your team members. Feel free to create a new channel for you and your team where you can discuss everything. If you and your team have already set up other ways of communicating, it is absolutely not required to make use of the discord. However, we do recommend each team to set up some kind of channel or group for quick discussions. ### Project definition In the very beginning, you should make sure your project is well-defined and that everybody in the team understands the goal of the project and the work that needs to be done in order to achieve the goal. A well-defined project: - has defined the task on which the model will be trained - has defined the model that will be trained - has defined the datasets that will be used for training - has defined the type of training scripts that need to be written - has defined the desired outcome of the project - has defined the workflows By "has defined" we don't meant that the corresponding code already has to be written and ready to be used, but that everybody in team is on the same page on what type of model, data and training script should be used. To give an example, a well-defined project would be the following: - task: summarization - model: [t5-small](https://huggingface.co/t5-small) - dataset: [CNN/Daily mail](https://huggingface.co/datasets/cnn_dailymail) - training script: [run_summarization_flax.py](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) - outcome: t5 model that can summarize news - work flow: adapt `run_summarization_flax.py` to work with `t5-small`. This example is a very easy and not the most interesting project since a `t5-small` summarization model exists already for CNN/Daily mail and pretty much no code has to be written. A well-defined project does not need to have the dataset be part of the `datasets` library and the training script already be pre-written, however it should be clear how the desired dataset can be accessed and how the training script can be written. It is also important to have a clear plan regarding the workflow. Usually, the data processing is done in a first step. Once the data is in a format that the model can work with, the training script can be written, etc. These steps should be more detailed once the team has a clearly defined project. It can be helpful to set deadlines for each step. ### Workload division To effectively work as a team, it is crucial to divide the workload among everybody. Some team members will be more motivated and experienced than others and some team members simply want to participate to learn more and cannot contribute that much to the team. This is totally fine! One cannot expect everybody in the team to have the same level of experience and time/motivation during the community week. As a conclusion, being honest about one's expected involvement is crucial so that the workload can be divided accordingly. If someone doesn't think her/his tasks are feasible - let the team know early on so that someone else can take care of it! It is recommended that the motivated and experienced team members take the lead in dividing the work and are ready to take over the tasks of another team member if necessary. The workload can often be divided according to: - data preprocessing (load the data and preprocess data in the correct format) - data tokenization / data collator (process data samples into tokens or images) - model configuration (writing the code that defines the model) - model forward pass (make sure input / output work correctly) - loss function (define the loss function) - putting the pieces together in a training script Many of the steps above require other steps to be finished, so it often makes sense to use dummy data in the expected format to start, *e.g.*, with the model forward pass before the data preprocessing is done. ### Expectations It is also very important to stay realistic with the scope of your project. Each team has access to a TPUv3-8 for only *ca.* 10 days, so it's important to keep the scope of the project reasonable. While we do want each team to work on interesting projects, each team should make sure that the project goals can be achieved within the provided compute time on TPU. For instance, pretraining a 11 billion parameters T5 model is not really a realistic task with just 10 days of TPUv3-8 compute. Also, it might be difficult to finish a project where the whole modeling, dataset and training code has to be written from scratch. Having defined your project, feel free to reach out on Slack or the forum for feedback from the organizers. We can surely give you our opinion on whether the project is feasible and what can be done to improve it. the project is feasible. ### Other tips Here is a collection of some more tips: - We strongly recommend to work as publicly and collaboratively as possible during the week so that other teams and the organizers can best help you. This includes publishing important discussions on the forum and making use of the [🤗 hub](http://huggingface.co/) to have a version control for your models and training logs. - When debugging, it is important that the debugging cycle is kept as short as possible to be able to effectively debug. *E.g.* if there is a problem with your training script, you should run it with just a couple of hundreds of examples and not the whole dataset script. This can be done by either making use of [datasets streaming](https://huggingface.co/docs/datasets/master/dataset_streaming.html?highlight=streaming) or by selecting just the first X number of data samples after loading: ```python datasets["train"] = datasets["train"].select(range(1000)) ``` - Ask for help. If you are stuck, use the public Slack channel or the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22) to ask for help. ## How to install relevant libraries In the following we will explain how to install all relevant libraries on your local computer and on TPU VM. It is recommended to install all relevant libraries both on your local machine and on the TPU virtual machine. This way, quick prototyping and testing can be done on your local machine and the actual training can be done on the TPU VM. ### Local computer The following libraries are required to train a JAX/Flax model with 🤗 Transformers and 🤗 Datasets: - [JAX](https://github.com/google/jax/) - [Flax](https://github.com/google/flax) - [Optax](https://github.com/deepmind/optax) - [Transformers](https://github.com/huggingface/transformers) - [Datasets](https://github.com/huggingface/datasets) You should install the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going to use and activate it. You should be able to run the command: ```bash python3 -m venv <your-venv-name> ``` You can activate your venv by running ```bash source ~/<your-venv-name>/bin/activate ``` We strongly recommend to make use of the provided JAX/Flax examples scripts in [transformers/examples/flax](https://github.com/huggingface/transformers/tree/main/examples/flax) even if you want to train a JAX/Flax model of another github repository that is not integrated into 🤗 Transformers. In all likelihood, you will need to adapt one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. Doing so will allow you to share your fork of the Transformers library with your team members so that the team effectively works on the same code base. It will also automatically install the newest versions of `flax`, `jax` and `optax`. 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone https://github.com/<your Github handle>/transformers.git $ cd transformers $ git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: ```bash $ git checkout -b a-descriptive-name-for-my-project ``` 4. Set up a flax environment by running the following command in a virtual environment: ```bash $ pip install -e ".[flax]" ``` (If transformers was already installed in the virtual environment, remove it with `pip uninstall transformers` before reinstalling it in editable mode with the `-e` flag.) If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets` library. Running this command will automatically install `flax`, `jax` and `optax`. Next, you should also install the 🤗 Datasets library. We strongly recommend installing the library from source to profit from the most current additions during the community week. Simply run the following steps: ``` $ cd ~/ $ git clone https://github.com/huggingface/datasets.git $ cd datasets $ pip install -e ".[streaming]" ``` If you plan on contributing a specific dataset during the community week, please fork the datasets repository and follow the instructions [here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). To verify that all libraries are correctly installed, you can run the following command. It assumes that both `transformers` and `datasets` were installed from main - otherwise datasets streaming will not work correctly. ```python from transformers import FlaxRobertaModel, RobertaTokenizerFast from datasets import load_dataset import jax dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) dummy_input = next(iter(dataset))["text"] tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") input_ids = tokenizer(dummy_input, return_tensors="np").input_ids[:, :10] model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown") # run a forward pass, should return an object `FlaxBaseModelOutputWithPooling` model(input_ids) ``` ### TPU VM **VERY IMPORTANT** - Only one process can access the TPU cores at a time. This means that if multiple team members are trying to connect to the TPU cores errors, such as: ``` libtpu.so already in used by another process. Not attempting to load libtpu.so in this process. ``` are thrown. As a conclusion, we recommend every team member to create her/his own virtual environment, but only one person should run the heavy training processes. Also, please take turns when setting up the TPUv3-8 so that everybody can verify that JAX is correctly installed. The following libraries are required to train a JAX/Flax model with 🤗 Transformers and 🤗 Datasets on TPU VM: - [JAX](https://github.com/google/jax/) - [Flax](https://github.com/google/flax) - [Optax](https://github.com/deepmind/optax) - [Transformers](https://github.com/huggingface/transformers) - [Datasets](https://github.com/huggingface/datasets) You should install the above libraries in a [virtual environment](https://docs.python.org/3/library/venv.html). If you're unfamiliar with Python virtual environments, check out the [user guide](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/). Create a virtual environment with the version of Python you're going to use and activate it. You should be able to run the command: ```bash python3 -m venv <your-venv-name> ``` If this doesn't work, you first might to have install `python3-venv`. You can do this as follows: ```bash sudo apt-get install python3-venv ``` You can activate your venv by running ```bash source ~/<your-venv-name>/bin/activate ``` Next you should install JAX's TPU version on TPU by running the following command: ``` $ pip install requests ``` and then: ``` $ pip install "jax[tpu]>=0.2.16" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html ``` **Note**: Running this command might actually throw an error, such as: ``` Building wheel for jax (setup.py) ... error ERROR: Command errored out with exit status 1: command: /home/patrick/patrick/bin/python3 -u -c 'import sys, setuptools, tokenize; sys.argv[0] = '"'"'/tmp/pip-install-lwseckn1/jax/setup.py'"'"'; __file__='"'"'/tmp/pip-install-lwseckn1/jax/setup.py'"'"';f=getattr(tokenize, '"'"'open'"'"', open)(__file__);code=f.read().replace('"'"'\r\n'"'"', '"'"'\n'"'"');f.close();exec(compile(code, __file__, '"'"'exec'"'"'))' bdist_wheel -d /tmp/pip-wheel-pydotzlo cwd: /tmp/pip-install-lwseckn1/jax/ Complete output (6 lines): usage: setup.py [global_opts] cmd1 [cmd1_opts] [cmd2 [cmd2_opts] ...] or: setup.py --help [cmd1 cmd2 ...] or: setup.py --help-commands or: setup.py cmd --help error: invalid command 'bdist_wheel' ---------------------------------------- ERROR: Failed building wheel for jax ``` Jax should have been installed correctly nevertheless. To verify that JAX was correctly installed, you can run the following command: ```python import jax jax.device_count() ``` This should display the number of TPU cores, which should be 8 on a TPUv3-8 VM. We strongly recommend to make use of the provided JAX/Flax examples scripts in [transformers/examples/flax](https://github.com/huggingface/transformers/tree/main/examples/flax) even if you want to train a JAX/Flax model of another github repository that is not integrated into 🤗 Transformers. In all likelihood, you will need to adapt one of the example scripts, so we recommend forking and cloning the 🤗 Transformers repository as follows. Doing so will allow you to share your fork of the Transformers library with your team members so that the team effectively works on the same code base. It will also automatically install the newest versions of `flax`, `jax` and `optax`. 1. Fork the [repository](https://github.com/huggingface/transformers) by clicking on the 'Fork' button on the repository's page. This creates a copy of the code under your GitHub user account. 2. Clone your fork to your local disk, and add the base repository as a remote: ```bash $ git clone https://github.com/<your Github handle>/transformers.git $ cd transformers $ git remote add upstream https://github.com/huggingface/transformers.git ``` 3. Create a new branch to hold your development changes. This is especially useful to share code changes with your team: ```bash $ git checkout -b a-descriptive-name-for-my-project ``` 4. Set up a flax environment by running the following command in a virtual environment: ```bash $ pip install -e ".[flax]" ``` (If transformers was already installed in the virtual environment, remove it with `pip uninstall transformers` before reinstalling it in editable mode with the `-e` flag.) If you have already cloned that repo, you might need to `git pull` to get the most recent changes in the `datasets` library. Running this command will automatically install `flax`, `jax` and `optax`. Next, you should also install the 🤗 Datasets library. We strongly recommend installing the library from source to profit from the most current additions during the community week. Simply run the following steps: ``` $ cd ~/ $ git clone https://github.com/huggingface/datasets.git $ cd datasets $ pip install -e ".[streaming]" ``` If you plan on contributing a specific dataset during the community week, please fork the datasets repository and follow the instructions [here](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-create-a-pull-request). To verify that all libraries are correctly installed, you can run the following command. It assumes that both `transformers` and `datasets` were installed from main - otherwise datasets streaming will not work correctly. ```python from transformers import FlaxRobertaModel, RobertaTokenizerFast from datasets import load_dataset import jax dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) dummy_input = next(iter(dataset))["text"] tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") input_ids = tokenizer(dummy_input, return_tensors="np").input_ids[:, :10] model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown") # run a forward pass, should return an object `FlaxBaseModelOutputWithPooling` model(input_ids) ``` ## Quickstart flax and jax [JAX](https://jax.readthedocs.io/en/latest/index.html) is Autograd and XLA, brought together for high-performance numerical computing and machine learning research. It provides composable transformations of Python+NumPy programs: differentiate, vectorize, parallelize, Just-In-Time compile to GPU/TPU, and more. A great place for getting started with JAX is the [JAX 101 Tutorial](https://jax.readthedocs.io/en/latest/jax-101/index.html). [Flax](https://flax.readthedocs.io/en/latest/index.html) is a high-performance neural network library designed for flexibility built on top of JAX. It aims to provide users with full control of their training code and is carefully designed to work well with JAX transformations such as `grad` and `pmap` (see the [Flax philosophy](https://flax.readthedocs.io/en/latest/philosophy.html)). For an introduction to Flax see the [Flax Basics Colab](https://flax.readthedocs.io/en/latest/notebooks/flax_basics.html) or the list of curated [Flax examples](https://flax.readthedocs.io/en/latest/examples.html). ## Quickstart flax and jax in transformers Currently, we support the following models in Flax. Note that some models are about to be merged to `main` and will be available in a couple of days. - [BART](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bart/modeling_flax_bart.py) - [BERT](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_flax_bert.py) - [BigBird](https://github.com/huggingface/transformers/blob/main/src/transformers/models/big_bird/modeling_flax_big_bird.py) - [CLIP](https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/modeling_flax_clip.py) - [ELECTRA](https://github.com/huggingface/transformers/blob/main/src/transformers/models/electra/modeling_flax_electra.py) - [GPT2](https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_flax_gpt2.py) - [(TODO) MBART](https://github.com/huggingface/transformers/blob/main/src/transformers/models/mbart/modeling_flax_mbart.py) - [RoBERTa](https://github.com/huggingface/transformers/blob/main/src/transformers/models/roberta/modeling_flax_roberta.py) - [T5](https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_flax_t5.py) - [ViT](https://github.com/huggingface/transformers/blob/main/src/transformers/models/vit/modeling_flax_vit.py) - [Wav2Vec2](https://github.com/huggingface/transformers/blob/main/src/transformers/models/wav2vec2/modeling_flax_wav2vec2.py) You can find all available training scripts for JAX/Flax under the official [flax example folder](https://github.com/huggingface/transformers/tree/main/examples/flax). Note that a couple of training scripts will be released in the following week. - [Causal language modeling (GPT2)](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_clm_flax.py) - [Masked language modeling (BERT, RoBERTa, ELECTRA, BigBird)](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_mlm_flax.py) - [Text classification (BERT, RoBERTa, ELECTRA, BigBird)](https://github.com/huggingface/transformers/blob/main/examples/flax/text-classification/run_flax_glue.py) - [Summarization / Seq2Seq (BART, MBART, T5)](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) - [Masked Seq2Seq pret-training (T5)](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_t5_mlm_flax.py) - [Contrastive Loss pretraining for Wav2Vec2](https://github.com/huggingface/transformers/blob/main/examples/research_projects/jax-projects/wav2vec2) - [Fine-tuning long-range QA for BigBird](https://github.com/huggingface/transformers/blob/main/examples/research_projects/jax-projects/big_bird) - [(TODO) Image classification (ViT)]( ) - [(TODO) CLIP pretraining, fine-tuning (CLIP)]( ) ### **Flax design philosophy in Transformers** This section will explain how Flax models are implemented in Transformers and how the design differs from PyTorch. Let's first go over the difference between Flax and PyTorch. In JAX, most transformations (notably `jax.jit`) require functions that are transformed to be stateless so that they have no side effects. This is because any such side-effects will only be executed once when the transformed function is run during compilation and all subsequent calls of the compiled function would re-use the same side-effects of the compiled run instead of the "actual" side-effects (see [Stateful Computations in JAX](https://jax.readthedocs.io/en/latest/jax-101/07-state.html)). As a consequence, Flax models, which are designed to work well with JAX transformations, are stateless. This means that when running a model in inference, both the inputs and the model weights are passed to the forward pass. In contrast, PyTorch model are very much stateful with the weights being stored within the model instance and the user just passing the inputs to the forward pass. Let's illustrate the difference between stateful models in PyTorch and stateless models in Flax. For simplicity, let's assume the language model consists simply of a single attention layer [`key_proj`, `value_proj`, `query_proj`] and a linear layer `logits_proj` to project the transformed word embeddings to the output logit vectors. #### **Stateful models in PyTorch** In PyTorch, the weights matrices would be stored as `torch.nn.Linear` objects alongside the model's config inside the model class `ModelPyTorch`: ```python class ModelPyTorch: def __init__(self, config): self.config = config self.key_proj = torch.nn.Linear(config) self.value_proj = torch.nn.Linear(config) self.query_proj = torch.nn.Linear(config) self.logits_proj = torch.nn.Linear(config) ``` Instantiating an object `model_pytorch` of the class `ModelPyTorch` would actually allocate memory for the model weights and attach them to the attributes `self.key_proj`, `self.value_proj`, `self.query_proj`, and `self.logits.proj`. We could access the weights via: ``` key_projection_matrix = model_pytorch.key_proj.weight.data ``` Visually, we would represent an object of `model_pytorch` therefore as follows: ![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_pytorch_def.png) Executing a forward pass then simply corresponds to passing the `input_ids` to the object `model_pytorch`: ```python sequences = model_pytorch(input_ids) ``` In a more abstract way, this can be represented as passing the word embeddings to the model function to get the output logits: ![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_pt_inference.png) This design is called **stateful** because the output logits, the `sequences`, can change even if the word embeddings, the `input_ids`, stay the same. Hence, the function's output does not only depend on its inputs, but also on its **state**, `[self.key_proj, self.value_proj, self.query_proj, self.logits_proj]`, which makes `model_pytorch` stateful. #### **Stateless models in Flax/JAX** Now, let's see how the mathematically equivalent model would be written in JAX/Flax. The model class `ModelFlax` would define the self-attention and logits projection weights as [**`flax.linen.Dense`**](https://flax.readthedocs.io/en/latest/_autosummary/flax.linen.Dense.html#flax.linen.Dense) objects: ```python class ModelFlax: def __init__(self, config): self.config = config self.key_proj = flax.linen.Dense(config) self.value_proj = flax.linen.Dense(config) self.query_proj = flax.linen.Dense(config) self.logits_proj = flax.linen.Dense(config) ``` At first glance the linear layer class `flax.linen.Dense` looks very similar to PyTorch's `torch.nn.Linear` class. However, instantiating an object `model_flax` only defines the linear transformation functions and does **not** allocate memory to store the linear transformation weights. In a way, the attribute `self.key_proj` tell the instantiated object `model_flax` to perform a linear transformation on some input and force it to expect a weight, called `key_proj`, as an input. This time we would illustrate the object `model_flax` without the weight matrices: ![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_flax_def.png) Accordingly, the forward pass requires both `input_ids` as well as a dictionary consisting of the model's weights (called `state` here) to compute the `sequences`: To get the initial `state` we need to explicitly do a forward pass by passing a dummy input: ```python state = model_flax.init(rng, dummy_input_ids) ``` and then we can do the forward pass. ```python sequences = model_flax.apply(state, input_ids) ``` Visually, the forward pass would now be represented as passing all tensors required for the computation to the model's object: ![alt text](https://raw.githubusercontent.com/patrickvonplaten/scientific_images/master/lm_flax_inference.png) This design is called **stateless** because the output logits, the `sequences`, **cannot** change if the word embeddings, the `input_ids`, stay the same. Hence, the function's output only depends on its inputs, being the `input_ids` and the `state` dictionary consisting of the weights **state**, `[key_proj, value_proj, query_proj, logits_proj]`. Another term which is often used to describe the design difference between Flax/JAX and PyTorch is **immutable** vs **mutable**. A instantiated Flax model, `model_flax`, is **immutable** as a logical consequence of `model_flax`'s output being fully defined by its input: If calling `model_flax` could mutate `model_flax`, then calling `model_flax` twice with the same inputs could lead to different results which would violate the "*statelessness*" of Flax models. #### **Flax models in Transformers** Now let us see how this is handled in `Transformers.` If you have used a Flax model in Transformers already, you might wonder how come you don't always have to pass the parameters to the function of the forward pass. This is because the `FlaxPreTrainedModel` class abstracts it away. It is designed this way so that the Flax models in Transformers will have a similar API to PyTorch and Tensorflow models. The `FlaxPreTrainedModel` is an abstract class that holds a Flax module, handles weights initialization, and provides a simple interface for downloading and loading pre-trained weights i.e. the `save_pretrained` and `from_pretrained` methods. Each Flax model then defines its own subclass of `FlaxPreTrainedModel`; *e.g.* the BERT model has `FlaxBertPreTrainedModel`. Each such class provides two important methods, `init_weights` and `__call__`. Let's see what each of those methods do: - The `init_weights` method takes the expected input shape and a [`PRNGKey`](https://jax.readthedocs.io/en/latest/_autosummary/jax.random.PRNGKey.html) (and any other arguments that are required to get initial weights) and calls `module.init` by passing it a random example to get the initial weights with the given `dtype` (for ex. `fp32` or `bf16` etc). This method is called when we create an instance of the model class, so the weights are already initialized when you create a model i.e., when you do model = FlaxBertModel(config) - The `__call__` method defines forward pass. It takes all necessary model inputs and parameters (and any other arguments required for the forward pass). The parameters are optional; when no parameters are passed, it uses the previously initialized or loaded parameters which can be accessed using `model.params`. It then calls the `module.apply` method, passing it the parameters and inputs to do the actual forward pass. So we can do a forward pass using output = model(inputs, params=params) Let's look at an example to see how this works. We will write a simple two-layer MLP model. First, write a Flax module that will declare the layers and computation. ```python import flax.linen as nn import jax.numpy as jnp class MLPModule(nn.Module): config: MLPConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense1 = nn.Dense(self.config.hidden_dim, dtype=self.dtype) self.dense2 = nn.Desne(self.config.hidden_dim, dtype=self.dtype) def __call__(self, inputs): hidden_states = self.dense1(inputs) hidden_states = nn.relu(hidden_states) hidden_states = self.dense2(hidden_states) return hidden_states ``` Now let's define the `FlaxPreTrainedModel` model class. ```python from transformers.modeling_flax_utils import FlaxPreTrainedModel class FlaxMLPPreTrainedModel(FlaxPreTrainedModel): config_class = MLPConfig base_model_prefix = "model" module_class: nn.Module = None def __init__(self, config: BertConfig, input_shape: Tuple = (1, 8), seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs): # initialize the flax module module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) def init_weights(self, rng, input_shape): # init input tensors inputs = jnp.zeros(input_shape, dtype="i4") params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} params = self.module.init(rngs, inputs)["params"] return params def __call__(self, inputs, params: dict = None): params = {"params": params or self.params} outputs = self.module.apply(params, jnp.array(inputs)) return outputs ``` Now we can define our model class as follows. ```python class FlaxMLPModel(FlaxMLPPreTrainedModel): module_class = FlaxMLPModule ``` Now the `FlaxMLPModel` will have a similar interface as PyTorch or Tensorflow models and allows us to attach loaded or randomly initialized weights to the model instance. So the important point to remember is that the `model` is not an instance of `nn.Module`; it's an abstract class, like a container that holds a Flax module, its parameters and provides convenient methods for initialization and forward pass. The key take-away here is that an instance of `FlaxMLPModel` is very much stateful now since it holds all the model parameters, whereas the underlying Flax module `FlaxMLPModule` is still stateless. Now to make `FlaxMLPModel` fully compliant with JAX transformations, it is always possible to pass the parameters to `FlaxMLPModel` as well to make it stateless and easier to work with during training. Feel free to take a look at the code to see how exactly this is implemented for ex. [`modeling_flax_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_flax_bert.py#L536) Another significant difference between Flax and PyTorch models is that, we can pass the `labels` directly to PyTorch's forward pass to compute the loss, whereas Flax models never accept `labels` as an input argument. In PyTorch, gradient backpropagation is performed by simply calling `.backward()` on the computed loss which makes it very handy for the user to be able to pass the `labels`. In Flax however, gradient backpropagation cannot be done by simply calling `.backward()` on the loss output, but the loss function itself has to be transformed by `jax.grad` or `jax.value_and_grad` to return the gradients of all parameters. This transformation cannot happen under-the-hood when one passes the `labels` to Flax's forward function, so that in Flax, we simply don't allow `labels` to be passed by design and force the user to implement the loss function oneself. As a conclusion, you will see that all training-related code is decoupled from the modeling code and always defined in the training scripts themselves. ### **How to use flax models and example scripts** #### **How to do a forward pass** Let's first see how to load, save and do inference with Flax models. As explained in the above section, all Flax models in Transformers have similar API to PyTorch models, so we can use the familiar `from_pretrained` and `save_pretrained` methods to load and save Flax models. Let's use the base `FlaxRobertaModel` without any heads as an example. ```python from transformers import FlaxRobertaModel, RobertaTokenizerFast import jax tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") inputs = tokenizer("JAX/Flax is amazing ", padding="max_length", max_length=128, return_tensors="np") model = FlaxRobertaModel.from_pretrained("julien-c/dummy-unknown") @jax.jit def run_model(input_ids, attention_mask): # run a forward pass, should return an object `FlaxBaseModelOutputWithPooling` return model(input_ids, attention_mask) outputs = run_model(**inputs) ``` We use `jax.jit` to compile the function to get maximum performance. Note that in the above example, we set `padding=max_length` to pad all examples to the same length. We do this because JAX's compiler has to recompile a function everytime its input shape changes - in a sense a compiled function is not only defined by its code but also by its input and output shape. It is usually much more effective to pad the input to be of a fixed static shape than having to recompile every the function multiple times. #### **How to write a training loop** Now let's see how we can write a simple training loop to train Flax models, we will use `FlaxGPT2ForCausalLM` as an example. A training loop for Flax models typically consists of - A loss function that takes the parameters and inputs, runs the forward pass and returns the loss. - We then transform the loss function using `jax.grad` or `jax.value_and_grad` so that we get the gradients of all parameters. - An optimizer to update the paramteres using the gradients returned by the transformed loss function. - A train step function which combines the loss function and optimizer update, does the forward and backward pass and returns the updated parameters. Lets see how that looks like in code: First initialize our model ```python import jax import jax.numpy as jnp from transformers import FlaxGPT2ForCausalLM model = FlaxGPT2ForCausalLM(config) ``` As explained above we don't compute the loss inside the model, but rather in the task-specific training script. For demonstration purposes, we write a pseudo training script for causal language modeling in the following. ```python from flax.training.common_utils import onehot def cross_entropy(logits, labels): return -jnp.sum(labels * jax.nn.log_softmax(logits, axis=-1), axis=-1) # define a function which will run the forward pass return loss def compute_loss(params, input_ids, labels): logits = model(input_ids, params=params, train=True) num_classes = logits.shape[-1] loss = cross_entropy(logits, onehot(labels, num_classes)).mean() return loss ``` Now we transform the loss function with `jax.value_and_grad`. ```python # transform the loss function to get the gradients grad_fn = jax.value_and_grad(compute_loss) ``` We use the [optax](https://github.com/deepmind/optax) library to Initialize the optimizer. ```python import optax params = model.params tx = optax.sgd(learning_rate=3e-3) opt_state = tx.init(params) ``` Now we define a single training step which will do a forward and a backward pass. ```python def _train_step(params, opt_state, input_ids, labels) # do the forward pass and get the loss and gradients loss, grads = grad_fn(params, input_ids, labels) # use the gradients to update parameters updates, opt_state = tx.update(grads, opt_state) updated_params = optax.apply_updates(params, updates) return updates_params, opt_state, loss train_step = jax.jit(_train_step) ``` Finally, let's run our training loop. ```python # train loop for i in range(10): params, opt_state, loss = train_step(params, opt_state, input_ids, labels) ``` Note how we always pass the `params` and `opt_state` to the `train_step` which then returns the updated `params` and `opt_state`. This is because of the staless nature of JAX/Flax models, all the state like parameters, optimizer state is kept external. We can now save the model with the trained parameters using ```python model.save_pretrained("awesome-flax-model", params=params) ``` Note that, as JAX is backed by the [XLA](https://www.tensorflow.org/xla) compiler any JAX/Flax code can run on all `XLA` compliant device without code change! That menas you could use the same training script on CPUs, GPUs, TPUs. To know more about how to train the Flax models on different devices (GPU, multi-GPUs, TPUs) and use the example scripts, please look at the [examples README](https://github.com/huggingface/transformers/tree/main/examples/flax). ## Talks 3 days of talks around JAX / Flax, Transformers, large-scale language modeling and other great topics during our community event! ### Wednesday, June 30th - [Watch the talks on YouTube](https://www.youtube.com/watch?v=fuAyUQcVzTY) - [Chat history](https://docs.google.com/spreadsheets/d/1PZ5xYV2hVwlAVQSqDag65ympv5YNCSDmXyG-eWTaZ_o/edit?usp=sharing) Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Skye Wanderman-Milne, Google Brain | Intro to JAX on Cloud TPUs | 6.00pm-6.45pm CEST / 9.00am-9.45am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=fuAyUQcVzTY) | | Marc van Zee, Google Brain | Introduction to Flax | 6.45pm-7.30pm CEST / 9.45am-10.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/fuAyUQcVzTY?t=2569) | | Pablo Castro, Google Brain | Using Jax & Flax for RL with the Dopamine library | 7.30pm-8.00pm CEST / 10.30am-11.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/fuAyUQcVzTY?t=5306) | ### Thursday, July 1st - [Watch the talks on YouTube](https://www.youtube.com/watch?v=__eG63ZP_5g) - [Chat history](https://docs.google.com/spreadsheets/d/1PZ5xYV2hVwlAVQSqDag65ympv5YNCSDmXyG-eWTaZ_o/edit#gid=1515796400) Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Suraj Patil & Patrick von Platen, Hugging Face | How to use JAX/Flax with Transformers | 5.30pm-6.00pm CEST / 8.30am-9.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=__eG63ZP_5g) | | Sabrina J. Mielke, Johns Hopkins University & HuggingFace | From stateful code to purified JAX: how to build your neural net framework | 6.00pm-6.30pm CEST / 9.00am-9.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/__eG63ZP_5g?t=1576) | | Mostafa Dehghani, Google Brain | Long Range Arena: Benchmarking Efficient Transformers | 6.30pm-7.00pm CEST / 9.30am-10.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/__eG63ZP_5g?t=3695) | | Rohan Anil, Google Brain | Scalable Second Order Optimization for Deep Learning | 7.00pm-7.30pm CEST / 10.00am-10.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/__eG63ZP_5g?t=5285) | ### Friday, July 2nd - [Watch the talks on YouTube](https://www.youtube.com/watch?v=ZCMOPkcTu3s) - [Chat history](https://docs.google.com/spreadsheets/d/1PZ5xYV2hVwlAVQSqDag65ympv5YNCSDmXyG-eWTaZ_o/edit#gid=1166061401) Speaker | Topic | Time | Video | |-------------|---------------------------------|------------------------|------------------------| | Lucas Beyer, Google Brain | Vision Transformer | 5.00pm-5.30 CEST / 8.00am-8.30 PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://www.youtube.com/watch?v=ZCMOPkcTu3s) | | Ben Wang, EleutherAI | Multihost Training in Mesh Transformer JAX | 5.30pm-6.00 CEST / 8.30am-9.00 PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/ZCMOPkcTu3s?t=1803) | | Iurii Kemaev, Soňa Mokrá, Junhyuk Oh, DeepMind | DeepMind JAX Ecosystem | 6.00pm-6.30 CEST / 9.00am-9.30am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/ZCMOPkcTu3s?t=3388) | | Siddhartha Kamalakara, Joanna Yoo & João G M Araújo, Cohere | Training large scale language models | 6:30pm-7.00pm CEST / 9:30am-10.00am PST | [![Youtube](https://www.youtube.com/s/desktop/f506bd45/img/favicon_32.png)](https://youtu.be/ZCMOPkcTu3s?t=5095) | ### Talks & Speakers #### Skye Wanderman-Milne, JAX developer, Google Brain - Talk: Intro to JAX on Cloud TPUs - Abstract: JAX is a system for high-performance machine-learning research that combines the familiarity of Python + NumPy together with the power of hardware acceleration on CPUs, GPUs, and TPUs. It offers composable function transformations for automatic differentiation, automatic batching, end-to-end compilation, and both data and model parallelism. This talk will show you how to get up and running with JAX on a Cloud TPU VM. - Speaker info: Skye Wanderman-Milne is a software engineer working on JAX. She has previously worked on TensorFlow and Apache Impala, a high-performance distributed database. #### Marc van Zee, Research SWE, Google Brain (Flax team) - Talk: Introduction to Flax - Abstract: In this talk I will provide a high-level introduction to the neural network library Flax. I will discuss the Flax philosophy, talk about the ecosystem around Flax and provide a high-level introduction to the code. I explain the Module abstraction and how to use it to train your models. - Speaker info: Marc is at Google Research for over 4 years. First he worked on conceptual AI, developing a next generation language understanding and reasoning prototype and he authored the CFQ dataset for compositional generalization. Currently, Marc works as a research software engineer in the Flax team. #### Pablo Castro, Staff Research Software Developer; Google Research, Brain Team - Talk: Using Jax & Flax for RL with the Dopamine library - Abstract: The Dopamine library was launched with TensorFlow in 2018 and we added a Jax/Flax variant of it last year. Internally, Jax's flexibility has facilitated our RL research tremendously, and we are excited to demonstrate its potential. - Speaker info: Pablo Samuel has been at Google for over 9 years, and is currently a researcher with the Brain team, focusing on fundamental reinforcement learning, as well as machine learning and creativity. Aside from his research, Pablo Samuel is an active musician (with a channel exploring the intersection of music and computer science), and is helping increase the representation of the LatinX community in the research world. - Dopamine repo: https://github.com/google/dopamine - Homepage: https://psc-g.github.io/ - Twitter: https://twitter.com/pcastr #### Suraj Patil & Patrick von Platen, Machine Learning Engineers at Hugging Face - Talk: How to use JAX/Flax with Transformers - Abstract: Transformers is one of the most popular open-source ML libraries and supports PyTorch, Tensorflow, and JAX/Flax. In this talk, we will explain how JAX/Flax models should be used in Transformers and compare their design in Transformers with the design of PyTorch models in Transformers. In the second part, we will give you a hands-on presentation of how a model can be trained end-to-end with the official JAX/Flax example scripts using Transformers & Datasets. Along the way, we want to give you some tips and tricks on how to best realize your project. - Speaker info: Suraj and Patrick are part of Hugging Face’s open source team and lead the integration of JAX/Flax into Transformers. - GitHub: https://github.com/patil-suraj & https://github.com/patrickvonplaten #### Sabrina J. Mielke, PhD student at The Johns Hopkins University & Part-time research intern at HuggingFace - Talk: From stateful code to purified JAX: how to build your neural net framework - Abstract: Moving from object-oriented (and stateful) PyTorch- or TF2-code with tape-based backprop to JAX isn't easy---and while running grad() on numpy-oneliners is cool and all, you do wonder... how do I build actual big neural nets? Libraries like flax, trax, or haiku make it easy---but how could you build machinery like that yourself? - Speaker info: Sabrina is a PhD student at the Johns Hopkins University and a part-time research intern at HuggingFace, researching open-vocabulary language models for segmentation and tokenization. She has published and co-organized workshops and shared tasks on these topics as well as on morphology and typological analysis in ACL, NAACL, EMNLP, LREC, and AAAI. You can find her reminisce for a time when formal language theory played a bigger role in NLP on Twitter at @sjmielke. - Links: The 2020 blogpost this talk will be based on: https://sjmielke.com/jax-purify.htm, leading to our experiment Parallax and eventually Haiku #### Mostafa Dehghani, Research Scientist, Google Brain - Talk: Long Range Arena: Benchmarking Efficient Transformers - Abstract: Transformers do not scale very well to long sequence lengths largely because of quadratic self-attention complexity. In the recent months, a wide spectrum of efficient, fast Transformers have been proposed to tackle this problem, more often than not claiming superior or comparable model quality to vanilla Transformer models. So, we now need a well-established consensus on how to evaluate this class of models. Moreover, inconsistent benchmarking on a wide spectrum of tasks and datasets makes it difficult to assess relative model quality amongst many models. I'll talk about a systematic and unified benchmark, LRA, specifically focused on evaluating model quality under long-context scenarios. LRA is a suite of tasks consisting of sequences ranging from 1K to 16K tokens, encompassing a wide range of data types and modalities such as text, natural, synthetic images, and mathematical expressions requiring similarity, structural, and visual-spatial reasoning. We systematically evaluate ten well-established long-range Transformer models (Reformers, Linformers, Linear Transformers, Sinkhorn Transformers, Performers, Synthesizers, Sparse Transformers, and Longformers) on LRA. LRA paves the way towards better understanding this class of efficient Transformer models, facilitates more research in this direction, and presents new challenging tasks to tackle. - Speaker info: https://mostafadehghani.com/ #### Rohan Anil, Senior Staff Software Engineer, Google Research, Brain Team - Talk: Scalable Second Order Optimization for Deep Learning - Abstract: Optimization in machine learning, both theoretical and applied, is presently dominated by first-order gradient methods such as stochastic gradient descent. Second-order optimization methods, that involve second derivatives and/or second order statistics of the data, are far less prevalent despite strong theoretical properties, due to their prohibitive computation, memory and communication costs. In an attempt to bridge this gap between theoretical and practical optimization, we present a scalable implementation of a second-order preconditioned method (concretely, a variant of full-matrix Adagrad), that along with several critical algorithmic and numerical improvements, provides significant convergence and wall-clock time improvements compared to conventional first-order methods on state-of-the-art deep models. Our novel design effectively utilizes the prevalent heterogeneous hardware architecture for training deep models, consisting of a multicore CPU coupled with multiple accelerator units. We demonstrate superior performance compared to state-of-the-art on very large learning tasks such as machine translation with Transformers, language modeling with BERT, click-through rate prediction on Criteo, and image classification on ImageNet with ResNet-50. - Speaker info: Rohan Anil is a software engineer at Google Research, Mountain View. Lately, he has been working on scalable and practical optimization techniques for efficient training of neural networks in various regimes. - Resources: - https://arxiv.org/abs/2002.09018 - https://arxiv.org/abs/1901.11150 - https://arxiv.org/abs/2106.06199 #### Lucas Beyer, Senior Research Engineer, Google Brain - Talk: Vision Transformer - Abstract: This talk will discuss the learning of general visual representations via large-scale pre-training and few-shot transfer, with a special focus on the Vision Transformer (ViT) architecture, which popularized transformers for the visual domain. - Speaker info: Lucas Beyer is a self-taught hacker and studied engineer. He went on to do his PhD in robotic perception at RWTH Aachen and is currently on a quest to find the ultimate visual representation at Google Brain in Zürich #### Ben Wang, Independent AI Researcher, EleutherAI - Talk: Multihost Training in Mesh Transformer JAX - Abstract: As models become larger, training must be scaled across multiple nodes. This talk discusses some design decisions and tradeoffs made for scaling to multiple nodes in Mesh Transformer JAX, a library for running model parallel transformers on TPU pods. - Speaker info: Ben is an independent AI researcher who contributes to EleutherAI, an open source research collective centered around democratizing access to powerful AI models. Recently he has released GPT-J-6B, a 6 billion parameter transformer which is the most powerful autoregressive language model in terms of zero-shot performance with public weights. - Website: https://www.eleuther.ai/ #### Iurii Kemaev, Research Engineer, Soňa Mokrá, Research Engineer, and Junhyuk Oh, Research Scientist, DeepMind - Talk: DeepMind JAX Ecosystem - Abstract: The DeepMind JAX Ecosystem is an effort to build a shared substrate of components to enable all aspects of AGI Research. In this talk, our researchers and engineers will give a high-level overview of our Ecosystem goals and design philosophies, using our Haiku (neural network), Optax (optimization) and RLax (reinforcement learning) libraries as examples. We will then deep dive on two examples of recent DeepMind research that have been enabled by JAX and these libraries: generative models and meta-gradient reinforcement learning. - Speaker info: - Iurii Kemaev is a Research Engineer at DeepMind. He has been using JAX for 2 years advancing RL research. Iurii is one of the DM JAX ecosystem leads. - Soňa Mokrá is a Research Engineer at DeepMind. She has a background in machine translation and has been using JAX as the main ML framework for the past 6 months. - Junhyuk Oh is a Research Scientist at DeepMind, working on reinforcement learning and meta-learning. More information is available at https://junhyuk.com/ #### Siddhartha Kamalakara, Joanna Yoo, João G M Araújo, MLE at Cohere - Talk: Training large scale language models - Abstract: A journey through Cohere’s experiences with training large scale language models. Join us in our exploration of pipeline and model parallelism as strategies for efficient training of large language models. We will present and motivate our recent transition to JAX+Flax as our choice of internal tech stack. - Speaker info: - João G M Araújo is a Brazilian college student with a passion for mathematics and a fascination for Deep Learning. João conducted research on representation learning and spent 3 months in Japan working on NeuroEvolution. João likes reading fantasy books and spending quality time with family and friends, and also runs a YouTube series on theoretical understanding of Deep Learning where researchers talk about their findings - Joanna Yoo is one of the founding engineers at Cohere, working on scaling language models for the last year and half. Joanna loves live concerts and rock climbing! - Siddhartha Rao Kamalakara is an MLE at Cohere and a researcher at FOR.ai with research interests at the intersection of efficient training and empirical understanding of DL. - Website: https://cohere.ai/ ## How to use the hub for collaboration In this section, we will explain how a team can use the 🤗 hub to collaborate on a project. The 🤗 hub allows each team to create a repository with integrated git version control that should be used for their project. The advantages of using a repository on the 🤗 hub are: - easy collaboration - each team member has write access to the model repository - integrated git version control - code scripts as well as large model files are tracked using git version control - easy sharing - the hub allows each team to easily share their work during and after the event - integrated tensorboard functionality - uploaded tensorboard traces are automatically displayed on an integrated tensorboard tab We highly recommend each team to make use of the 🤗 hub during the event. To better understand how the repository and the hub in general functions, please take a look at the documentation and the videos [here](https://huggingface.co/docs/hub). Now let's explain in more detail how a project can be created on the hub. Having an officially defined project on [this](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing) Google Sheet you should be part of [the Flax Community organization on the hub](https://huggingface.co/flax-community). All repositories should be created under this organization so that write access can be shared and everybody can easily access other participants' work 🤗. Note that we are giving each team member access to all repositories created under [flax-community](https://huggingface.co/flax-community), but we encourage participants to only clone and edit repositories corresponding to one's teams. If you want to help other teams, please ask them before changing files in their repository! The integrated git version control keeps track of all changes, so in case a file was deleted by mistake, it is trivial to re-create it. Awesome! Now, let's first go over a simple example where most of the required we'll pre-train a RoBERTa model on a low-resource language. To begin with, we create a repository under [the Flax Community organization on the hub](https://huggingface.co/flax-community) by logging in to the hub and going to [*"Add model"*](https://huggingface.co/new). By default the username should be displayed under "*Owner*", which we want to change to *flax-community*. Next, we give our repository a fitting name for the project - here we'll just call it *roberta-base-als* because we'll be pretraining a RoBERTa model on the super low-resource language *Alemannic* (`als`). We make sure that the model is a public repository and create it! It should then be displayed on [the Flax Community organization on the hub](https://huggingface.co/flax-community). Great, now we have a project directory with integrated git version control and a public model page, which we can access under [flax-community/roberta-base-als](https://huggingface.co/flax-community/roberta-base-als). Let's create a short README so that other participants know what this model is about. You can create the README.md directly on the model page as a markdown file. Let's now make use of the repository for training. We assume that the 🤗 Transformers library and [git-lfs](https://git-lfs.github.com/) are correctly installed on our machine or the TPU attributed to us. If this is not the case, please refer to the [Installation guide](#how-to-install-relevant-libraries) and the official [git-lfs](https://git-lfs.github.com/) website. At first we should log in: ```bash $ huggingface-cli login ``` Next we can clone the repo: ```bash $ git clone https://huggingface.co/flax-community/roberta-base-als ``` We have now cloned the model's repository and it should be under `roberta-base-als`. As you can see, we have all the usual git functionalities in this repo - when adding a file, we can do `git add .`, `git commit -m "add file"` and `git push` as usual. Let's try it out by adding the model's config. We go into the folder: ```bash $ cd ./roberta-base-als ``` and run the following commands in a Python shell to save a config. ```python from transformers import RobertaConfig config = RobertaConfig.from_pretrained("roberta-base") config.save_pretrained("./") ``` Now we've added a `config.json` file and can upload it by running ```bash $ git add . && git commit -m "add config" && git push ``` Cool! The file is now displayed on the model page under the [files tab](https://huggingface.co/flax-community/roberta-base-als/tree/main). We encourage you to upload all files except maybe the actual data files to the repository. This includes training scripts, model weights, model configurations, training logs, etc... Next, let's create a tokenizer and save it to the model dir by following the instructions of the [official Flax MLM README](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#train-tokenizer). We can again use a simple Python shell. ```python from datasets import load_dataset from tokenizers import ByteLevelBPETokenizer # load dataset dataset = load_dataset("oscar", "unshuffled_deduplicated_als", split="train") # Instantiate tokenizer tokenizer = ByteLevelBPETokenizer() def batch_iterator(batch_size=1000): for i in range(0, len(dataset), batch_size): yield dataset[i: i + batch_size]["text"] # Customized training tokenizer.train_from_iterator(batch_iterator(), vocab_size=50265, min_frequency=2, special_tokens=[ "<s>", "<pad>", "</s>", "<unk>", "<mask>", ]) # Save files to disk tokenizer.save("./tokenizer.json") ``` This creates and saves our tokenizer directly in the cloned repository. Finally, we can start training. For now, we'll simply use the official [`run_mlm_flax`](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_mlm_flax.py) script, but we might make some changes later. So let's copy the script into our model repository. ```bash $ cp ~/transformers/examples/flax/language-modeling/run_mlm_flax.py ./ ``` This way we are certain to have all the code used to train the model tracked in our repository. Let's start training by running: ```bash ./run_mlm_flax.py \ --output_dir="./" \ --model_type="roberta" \ --config_name="./" \ --tokenizer_name="./" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_als" \ --max_seq_length="128" \ --per_device_train_batch_size="4" \ --per_device_eval_batch_size="4" \ --learning_rate="3e-4" \ --warmup_steps="1000" \ --overwrite_output_dir \ --num_train_epochs="8" \ --push_to_hub ``` Since the dataset is tiny this command should actually run in less than 5 minutes. Note that we attach the flag ``--push_to_hub`` so that both model weights and tensorboard traces are automatically uploaded to the hub. You can see the tensorboard directly on the model page, under the [Training metrics tab](https://huggingface.co/flax-community/roberta-base-als/tensorboard). As you can see, it is pretty simple to upload model weights and training logs to the model hub. Since the repository has git version control, you & your team probably already have the necessary skills to collaborate. Thanks to `git-lfs` being integrated into the hub, model weights and other larger file can just as easily be uploaded and changed. Finally, at Hugging Face, we believe that the model hub is a great platform to share your project while you are still working on it: - Bugs in training scripts can be found and corrected by anybody participating in the event - Loss curves can be analyzed directly on the model page - Model weights can be accessed and analyzed by everybody from the model repository If you are not using a transformers model, don't worry - you should still be able to make use of the hub's functionalities! The [huggingface_hub](https://github.com/huggingface/huggingface_hub) allows you to upload essentially any JAX/Flax model to the hub with just a couple of lines of code. *E.g.* assuming you want to call your model simply `flax-model-dummy`, you can upload it to the hub with just three lines of code: ```python from flax import serialization from jax import random from flax import linen as nn from huggingface_hub import Repository model = nn.Dense(features=5) key1, key2 = random.split(random.PRNGKey(0)) x = random.normal(key1, (10,)) params = model.init(key2, x) bytes_output = serialization.to_bytes(params) repo = Repository("flax-model", clone_from="flax-community/flax-model-dummy", use_auth_token=True) with repo.commit("My cool Flax model :)"): with open("flax_model.msgpack", "wb") as f: f.write(bytes_output) # Repo is created and available here: https://huggingface.co/flax-community/flax-model-dummy ``` **Note**: Make sure to have `huggingface_hub >= 0.0.13` to make this command work. For more information, check out [this PR](https://github.com/huggingface/huggingface_hub/pull/143) on how to upload any framework to the hub. ## How to setup TPU VM In this section we will explain how you can ssh into a TPU VM that has been given to your team. If your username is in one of the officially defined projects [here](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing), you should have received two emails: - one that states that you have been granted the role "Community Week Participants" for the project hf-flax, and - one (or more if you are in multiple projects) that gives you the TPU name and the TPU zone for the TPU of your team You should click on "Open Cloud Console" on the first mail and agree to the pop up windows that follows. It will allow you to use a TPU VM. Don't worry if you cannot access the actual project `hf-flax` visually on the google cloud console and receive an error: ``` You don't have sufficient permission to view this page ``` - this is expected! Great, now you and your team can access your TPU VM! In the following, we will describe how to do so using a standard console, but you should also be able to connect to the TPU VM via IDEs, like Visual Studio Code, etc. 1. You need to install the Google Cloud SDK. Please follow the instructions on [cloud.google.com/sdk](https://cloud.google.com/sdk/docs/install#linux). 2. Once you've installed the google cloud sdk, you should set your account by running the following command. Make sure that `<your-email-address>` corresponds to the gmail address you used to sign up for this event. ```bash $ gcloud config set account <your-email-adress> ``` 3. Let's also make sure the correct project is set in case your email is used for multiple gcloud projects: ```bash $ gcloud config set project hf-flax ``` 4. Next, you will need to authenticate yourself. You can do so by running: ```bash $ gcloud auth login ``` This should give you a link to a website, where you can authenticate your gmail account. 5. Finally, you can ssh into the TPU VM! Please run the following command by setting <zone> to either `europe-west4-a` or `us-central1-a` (depending on what is stated in the second email you received) and <tpu-name> to the TPU name also sent to you in the second email. ```bash $ gcloud alpha compute tpus tpu-vm ssh <tpu-name> --zone <zone> --project hf-flax ``` This should ssh you into the TPU VM! Now you can follow the steps of the section [How to install relevant libraries](#how-to-install-relevant-libraries) to install all necessary libraries. Make sure to carefully follow the explanations of the "**IMPORTANT**" statement to correctly install JAX on TPU. Also feel free to install other `python` or `apt` packages on your machine if it helps you to work more efficiently! ## How to build a demo ### Using the Hugging Face Widgets Hugging Face has over [15 widgets](https://huggingface-widgets.netlify.app/) for different use cases using 🤗 Transformers library. Some of them also support [3rd party libraries](https://huggingface.co/docs/hub/libraries) such as [Sentence Similarity](https://huggingface.co/sentence-transformers/paraphrase-xlm-r-multilingual-v1) with Sentence Transformers and [Text to Speech](https://huggingface.co/julien-c/ljspeech_tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space_train) with [ESPnet](https://github.com/espnet/espnet). All the widgets are open sourced in the `huggingface_hub` [repo](https://github.com/huggingface/huggingface_hub/tree/main/widgets). Here is a summary of existing widgets: **NLP** * **Conversational:** To have the best conversations!. [Example](https://huggingface.co/microsoft/DialoGPT-large?). * **Feature Extraction:** Retrieve the input embeddings. [Example](https://huggingface.co/sentence-transformers/distilbert-base-nli-mean-tokens?text=test). * **Fill Mask:** Predict potential words for a mask token. [Example](https://huggingface.co/bert-base-uncased?). * **Question Answering:** Given a context and a question, predict the answer. [Example](https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad). * **Sentence Simmilarity:** Predict how similar a set of sentences are. Useful for Sentence Transformers. * **Summarization:** Given a text, output a summary of it. [Example](https://huggingface.co/sshleifer/distilbart-cnn-12-6). * **Table Question Answering:** Given a table and a question, predict the answer. [Example](https://huggingface.co/google/tapas-base-finetuned-wtq). * **Text Generation:** Generate text based on a prompt. [Example](https://huggingface.co/gpt2) * **Token Classification:** Useful for tasks such as Named Entity Recognition and Part of Speech. [Example](https://huggingface.co/dslim/bert-base-NER). * **Zero-Shot Classification:** Too cool to explain with words. Here is an [example](https://huggingface.co/typeform/distilbert-base-uncased-mnli) * ([WIP](https://github.com/huggingface/huggingface_hub/issues/99)) **Table to Text Generation**. **Speech** * **Audio to Audio:** For tasks such as audio source separation or speech enhancement. * **Automatic Speech Recognition:** Convert audio to text. [Example](https://huggingface.co/facebook/wav2vec2-base-960h) * **Text to Speech**: Convert text to audio. **Image** * **Image Classification:** Given an image, predict its class. [Example](https://huggingface.co/osanseviero/llamastic). * ([WIP](https://github.com/huggingface/huggingface_hub/issues/100)) **Zero Shot Image Classification** * ([WIP](https://github.com/huggingface/huggingface_hub/issues/112)) **Image Captioning** * ([WIP](https://github.com/huggingface/huggingface_hub/issues/113)) **Text to Image Generation** * ([Proposed](https://github.com/huggingface/huggingface_hub/issues/127)) **Visual Question Answering** You can propose and implement new widgets by [opening an issue](https://github.com/huggingface/huggingface_hub/issues). Contributions are welcomed! ### Using a Streamlit demo Sometimes you might be using different libraries or a very specific application that is not well supported by the current widgets. In this case, [Streamlit](https://streamlit.io/) can be an excellent option to build a cool visual demo. Setting up a Streamlit application is straightforward and in Python! A common use case is how to load files you have in your model repository in the Hub from the Streamlit demo. The `huggingface_hub` library is here to help you! ``` pip install huggingface_hub ``` Here is an example downloading (and caching!) a specific file directly from the Hub ``` from huggingface_hub import hf_hub_download filepath = hf_hub_download("flax-community/roberta-base-als", "flax_model.msgpack"); ``` In many cases you will want to download the full repository. Here is an example downloading all the files from a repo. You can even specify specific revisions! ``` from huggingface_hub import snapshot_download local_path = snapshot_download("flax-community/roberta-base-als"); ``` Note that if you're using 🤗 Transformers library, you can quickly load the model and tokenizer as follows ``` from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("REPO_ID") model = AutoModelForMaskedLM.from_pretrained("REPO_ID") ``` We'll provide more examples on Streamlit demos next week. Stay tuned! ### Using a Gradio demo You can also use [Gradio](https://gradio.app/) to share your demos! [Here](https://huggingface.co/blog/gradio) is an example using the Gradio library to create a GUI for a Hugging Face model. More to come! ## Project evaluation For your project to be evaluated, please fill out [this google form](https://forms.gle/jQaMkj3JJdD4Xcwn9). Please make sure that your submitted project includes a demo as well as information about the model, data, training methods, etc. ### Criteria * **Demo.** All projects are required to have a demo. It’s open ended, but we provide some ideas on how to build demos in the [How to build a demo](#how-to-build-a-demo) section. * **Technical difficulty.** Difficulty has different aspects, such as working with complex architectures, obtaining better evaluation metrics than existing models, or implementing models for low-resource languages. * **Social impact.** The project is expected to have a positive social impact, e.g. by tackling under-explored area of practical interest for minorities or under-represented group (low-ressources languages, specific focus on bias, fairness or ethical issues in ML) or by tackling general societal challenges, e.g. health or climate related challenges. * **Innovativeness.** Projects that propose novel applications or bring new ideas will be rewarded more. ### Jury * [Niki Parmar](https://research.google/people/NikiParmar/): Staff Research Scientist at Google. * [Ross Wightman](https://www.linkedin.com/in/wightmanr/): Angel Investor. * [Thomas Wolf](https://www.linkedin.com/in/thomas-wolf-a056857/): Co-founder and CSO at Hugging Face. * [Ashish Vaswani](https://research.google/people/AshishVaswani/): Staff Research Scientist at Google Brain. ### Process * **July 17, 12h00 CEST**: TPU VM access closes. * **July 19, 12h00 CEST**: Project completition ends (including demo). * **July 19-21** A group of event organizers (Suraj, Patrick, Suzana, and Omar) will do an initial filter to find the top 15 projects. * **July 22-26** The jury will go over the 15 projects and pick the top three projects out of them. * **July 27.** Winner projects are announced ## General tips and tricks TODO (will be filled continuously)... ## FAQ TODO (will be filled continuously)...
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/jax-projects/HOW_TO_PROPOSE_PROJECT.md
# How to propose a Flax/JAX + Transformers project Great that you've opened this document! While we at 🤗 are proposing a couple of projects, we strongly believe that the community can come up with much more **creative**, **fun**, and **impactful** projects on their own. This being said, we are really looking forward to seeing your project proposal! ## What a project should be about The proposed project should fall into the machine learning fields of **Natural Language Processing (NLP)** and/or **Computer Vision (CV)** (possibly also **Speech Recognition (ASR)** depending on whether Speech Recognition models are available in Flax in due time) and aim at solving a specific task. Possible tasks can belong to: * text classification * text generation * image recognition * image processing * image captioning * audio classification * and other tasks you can think of! The clearer a task is defined, the better your project proposal is. *E.g.* "Using a T5 model to learn grammar correction in French" or "Adapting a pre-trained CLIP model for zero-shot image classification in Spanish" are **well-defined and clear** project proposals, while something like "Train a language model" or "Image classification" are **too vague**. There is no limit to your creativity as long as the project is feasible and ethical. The more creative & specific your project proposal, the more interesting it will be, and the more likely will you find motivated team members to work on your project! To get an idea of how to formulate your project proposals, you can browse through existing project proposals on the [forum](https://discuss.huggingface.co/c/flax-jax-projects/22). ## How to submit a project proposal First, you should make sure that you are [logged in](https://huggingface.co/login?sso=bm9uY2U9OTRlNjZjZmZhYjMwMmJmMWMyYjc5MmFiMTMyMzY5ODYmcmV0dXJuX3Nzb191cmw9aHR0cHMlM0ElMkYlMkZkaXNjdXNzLmh1Z2dpbmdmYWNlLmNvJTJGc2Vzc2lvbiUyRnNzb19sb2dpbg%3D%3D&sig=429ad8924bcb33c40f9823027ea749abb55d393f4f58924f36a2dba3ab0a48da) with your Hugging Face account on the forum. Second, make sure that your project idea doesn't already exist by checking [existing projects](https://discuss.huggingface.co/c/flax-jax-projects/22). If your project already exists - great! This means that you can comment and improve the existing idea and join the project to form a team! If your project idea already exists for a different language, feel free to submit the same project idea, just in a different language. Third, having ensured that your project doesn't exist, click on the *"New Topic"* button on the [Flax/JAX Projects Forum category](https://discuss.huggingface.co/c/flax-jax-projects/22) to create a new project proposal. Fourth, make sure that your project proposal includes the following information: 1. *A clear description of the project* 2. *In which language should the project be conducted?* English, German, Chinese, ...? It can also be a multi-lingual project 3. *Which model should be used?* If you want to adapt an existing model, you can add the link to one of the 4000 available checkpoints in JAX [here](https://huggingface.co/models?filter=jax) If you want to train a model from scratch, you can simply state the model architecture to be used, *e.g.* BERT, CLIP, etc. You can also base your project on a model that is not part of transformers. For an overview of libraries based on JAX, you can take a look at [awesome-jax](https://github.com/n2cholas/awesome-jax#awesome-jax-). **Note** that for a project that is not based on Transformers it will be more difficult for the 🤗 team to help you. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what model architectures are currently supported in 🤗 Transformers. 4. *What data should be used?* It is important to state at least what kind of data you would like to use. Ideally, you can already point to publicly available data or a dataset in the 🤗 Datasets library. 5. *Are similar training scripts available in Flax/JAX?* It would be important to find similar training scripts that already exist in Flax/JAX. *E.g.* if you are working on a Seq-to-Seq task, you can make use of the [`run_summarization_flax.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/summarization/run_summarization_flax.py) script which is very similar to any seq2seq training. Also have a look at the section [Quickstart Flax & Jax in Transformers](https://github.com/huggingface/transformers/tree/main/examples/research_projects/jax-projects#quickstart-flax-and-jax-in-transformers) to see what training scripts are currently supported in 🤗 Transformers. 6. *(Optionally) What are possible challenges?* List possible difficulties with your project. *E.g.* If you know that training convergence usually takes a lot of time, it is worth stating this here! 7. *(Optionally) What is the desired project outcome?* - How would you like to demo your project? One could *e.g.* create a Streamlit application. 8. *(Optionally) Links to read upon* - Can you provide any links that would help the reader to better understand your project idea? Feel free to copy-paste the following format for your project proposal and fill out the respective sections: ``` # <FILL ME: Name of project> <FILL ME: A clear description of the project> ## 2. Language The model will be trained in <FILL ME: which language?>. ## 3. Model <FILL ME: 3. Which model should be used?> ## 4. Datasets <FILL ME: 4. Which data should be used?> Possible links to publicly available datasets include: - <FILL ME: Link 1 to dataset> - <FILL ME: Link 2 to dataset> - <FILL ME: Link 3 to dataset> ## 5. Training scripts <FILL ME: 5. Are there publicly available training scripts that can be used/tweaked for the project?> We can make use of <FILL ME: link to training script> to train the model.> ## 6. (Optional) Challenges <(Optionally) FILL ME: 6. What are possible challenges?> ## 7. (Optional) Desired project outcome <(Optionally) FILL ME: 7. What is the desired project outcome? A demo?> ## 8. (Optional) Reads The following links can be useful to better understand the project and what has previously been done. - <FILL ME: Link 1 to read> - <FILL ME: Link 2 to read> - <FILL ME: Link 3 to read> ``` To see how a proposed project looks like, please have a look at submitted project proposals [here](https://discuss.huggingface.co/c/flax-jax-projects/22). ## Will my project proposal be selected? Having submitted a project proposal, you can now promote your idea in the Slack channel `#flax-jax-community-week` to try to convince other participants to join your project! Once other people have joined your project, one of the organizers (`@Suzana, @valhalla, @osanseviero, @patrickvonplaten`) will officially create a team for your project and add your project to [this google sheet](https://docs.google.com/spreadsheets/d/1GpHebL7qrwJOc9olTpIPgjf8vOS0jNb6zR_B8x_Jtik/edit?usp=sharing).
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/requirements.txt
git+https://github.com/huggingface/transformers@main datasets sentencepiece wandb flax jsonlines
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/README.md
Author: [@vasudevgupta7](https://github.com/thevasudevgupta/) ## Intro In this project, we fine-tuned [**BigBird**](https://arxiv.org/abs/2007.14062) on [**natural-questions**](https://huggingface.co/datasets/natural_questions) dataset for **question-answering** task on long documents. **BigBird**, is a **sparse-attention based transformer** which extends Transformer based models, such as BERT to much **longer sequences**. Read more about BigBird at https://huggingface.co/blog/big-bird ## Fine-tuning **Setup** You need to install jax yourself by following the official docs ([refer this](https://github.com/google/jax#installation)). Other requirements for this project can be installed by running following command: ```shell pip3 install -qr requirements.txt ``` **Download & prepare dataset** The Natural Questions corpus contains questions from real users, and it requires QA systems to read and comprehend an entire Wikipedia article that may or may not contain the answer to the question. This corpus takes ~100 GB on disk. We have used HuggingFace datasets to download & process the dataset. ```shell # just run following CMD python3 prepare_natural_questions.py # this will download the whole dataset from HuggingFace Hub & will make it ready for training # this script takes ~3 hours to process the dataset ``` **Launch Training** We have trained on Cloud's TPU v3-8. Each epoch took around 4.5 hours and the model got converged in just 2 epochs. You can see complete training args in [this script](bigbird_flax.py). ```shell # just run following CMD python3 train.py # In case, you want to try hparams tuning, you can run wandb sweep wandb sweep --project=bigbird sweep_flax.yaml wandb agent <agent-id-obtained-by-above-CMD> ``` ## Evaluation Our evaluation script is different from the original script and we are evaluating sequences with length up to 4096 for simplicity. We managed to get the **EM score of ~55.2** using our evaluation script. ```shell # download validation-dataset first mkdir natural-questions-validation wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/natural_questions-validation.arrow -P natural-questions-validation wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/dataset_info.json -P natural-questions-validation wget https://huggingface.co/datasets/vasudevgupta/natural-questions-validation/resolve/main/state.json -P natural-questions-validation # simply run following command python3 evaluate.py ``` You can find our checkpoint on HuggingFace Hub ([see this](https://huggingface.co/vasudevgupta/flax-bigbird-natural-questions)). In case you are interested in PyTorch BigBird fine-tuning, you can refer to [this repositary](https://github.com/thevasudevgupta/bigbird).
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/prepare_natural_questions.py
import os import jsonlines import numpy as np from tqdm import tqdm DOC_STRIDE = 2048 MAX_LENGTH = 4096 SEED = 42 PROCESS_TRAIN = os.environ.pop("PROCESS_TRAIN", "false") CATEGORY_MAPPING = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4} def _get_single_answer(example): def choose_first(answer, is_long_answer=False): assert isinstance(answer, list) if len(answer) == 1: answer = answer[0] return {k: [answer[k]] for k in answer} if is_long_answer else answer for a in answer: if is_long_answer: a = {k: [a[k]] for k in a} if len(a["start_token"]) > 0: break return a answer = {"id": example["id"]} annotation = example["annotations"] yes_no_answer = annotation["yes_no_answer"] if 0 in yes_no_answer or 1 in yes_no_answer: answer["category"] = ["yes"] if 1 in yes_no_answer else ["no"] answer["start_token"] = answer["end_token"] = [] answer["start_byte"] = answer["end_byte"] = [] answer["text"] = ["<cls>"] else: answer["category"] = ["short"] out = choose_first(annotation["short_answers"]) if len(out["start_token"]) == 0: # answer will be long if short is not available answer["category"] = ["long"] out = choose_first(annotation["long_answer"], is_long_answer=True) out["text"] = [] answer.update(out) # disregard some samples if len(answer["start_token"]) > 1 or answer["start_token"] == answer["end_token"]: answer["remove_it"] = True else: answer["remove_it"] = False cols = ["start_token", "end_token", "start_byte", "end_byte", "text"] if not all(isinstance(answer[k], list) for k in cols): raise ValueError("Issue in ID", example["id"]) return answer def get_context_and_ans(example, assertion=False): """Gives new context after removing <html> & new answer tokens as per new context""" answer = _get_single_answer(example) # bytes are of no use del answer["start_byte"] del answer["end_byte"] # handle yes_no answers explicitly if answer["category"][0] in ["yes", "no"]: # category is list with one element doc = example["document"]["tokens"] context = [] for i in range(len(doc["token"])): if not doc["is_html"][i]: context.append(doc["token"][i]) return { "context": " ".join(context), "answer": { "start_token": -100, # ignore index in cross-entropy "end_token": -100, # ignore index in cross-entropy "category": answer["category"], "span": answer["category"], # extra }, } # later, help in removing all no answers if answer["start_token"] == [-1]: return { "context": "None", "answer": { "start_token": -1, "end_token": -1, "category": "null", "span": "None", # extra }, } # handling normal samples cols = ["start_token", "end_token"] answer.update({k: answer[k][0] if len(answer[k]) > 0 else answer[k] for k in cols}) # e.g. [10] == 10 doc = example["document"]["tokens"] start_token = answer["start_token"] end_token = answer["end_token"] context = [] for i in range(len(doc["token"])): if not doc["is_html"][i]: context.append(doc["token"][i]) else: if answer["start_token"] > i: start_token -= 1 if answer["end_token"] > i: end_token -= 1 new = " ".join(context[start_token:end_token]) # checking above code if assertion: """checking if above code is working as expected for all the samples""" is_html = doc["is_html"][answer["start_token"] : answer["end_token"]] old = doc["token"][answer["start_token"] : answer["end_token"]] old = " ".join([old[i] for i in range(len(old)) if not is_html[i]]) if new != old: print("ID:", example["id"]) print("New:", new, end="\n") print("Old:", old, end="\n\n") return { "context": " ".join(context), "answer": { "start_token": start_token, "end_token": end_token - 1, # this makes it inclusive "category": answer["category"], # either long or short "span": new, # extra }, } def get_strided_contexts_and_ans(example, tokenizer, doc_stride=2048, max_length=4096, assertion=True): # overlap will be of doc_stride - q_len out = get_context_and_ans(example, assertion=assertion) answer = out["answer"] # later, removing these samples if answer["start_token"] == -1: return { "example_id": example["id"], "input_ids": [[-1]], "labels": { "start_token": [-1], "end_token": [-1], "category": ["null"], }, } input_ids = tokenizer(example["question"]["text"], out["context"]).input_ids q_len = input_ids.index(tokenizer.sep_token_id) + 1 # return yes/no if answer["category"][0] in ["yes", "no"]: # category is list with one element inputs = [] category = [] q_indices = input_ids[:q_len] doc_start_indices = range(q_len, len(input_ids), max_length - doc_stride) for i in doc_start_indices: end_index = i + max_length - q_len slice = input_ids[i:end_index] inputs.append(q_indices + slice) category.append(answer["category"][0]) if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": [-100] * len(category), "end_token": [-100] * len(category), "category": category, }, } splitted_context = out["context"].split() complete_end_token = splitted_context[answer["end_token"]] answer["start_token"] = len( tokenizer( " ".join(splitted_context[: answer["start_token"]]), add_special_tokens=False, ).input_ids ) answer["end_token"] = len( tokenizer(" ".join(splitted_context[: answer["end_token"]]), add_special_tokens=False).input_ids ) answer["start_token"] += q_len answer["end_token"] += q_len # fixing end token num_sub_tokens = len(tokenizer(complete_end_token, add_special_tokens=False).input_ids) if num_sub_tokens > 1: answer["end_token"] += num_sub_tokens - 1 old = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive start_token = answer["start_token"] end_token = answer["end_token"] if assertion: """This won't match exactly because of extra gaps => visaully inspect everything""" new = tokenizer.decode(old) if answer["span"] != new: print("ISSUE IN TOKENIZATION") print("OLD:", answer["span"]) print("NEW:", new, end="\n\n") if len(input_ids) <= max_length: return { "example_id": example["id"], "input_ids": [input_ids], "labels": { "start_token": [answer["start_token"]], "end_token": [answer["end_token"]], "category": answer["category"], }, } q_indices = input_ids[:q_len] doc_start_indices = range(q_len, len(input_ids), max_length - doc_stride) inputs = [] answers_start_token = [] answers_end_token = [] answers_category = [] # null, yes, no, long, short for i in doc_start_indices: end_index = i + max_length - q_len slice = input_ids[i:end_index] inputs.append(q_indices + slice) assert len(inputs[-1]) <= max_length, "Issue in truncating length" if start_token >= i and end_token <= end_index - 1: start_token = start_token - i + q_len end_token = end_token - i + q_len answers_category.append(answer["category"][0]) # ["short"] -> "short" else: start_token = -100 end_token = -100 answers_category.append("null") new = inputs[-1][start_token : end_token + 1] answers_start_token.append(start_token) answers_end_token.append(end_token) if assertion: """checking if above code is working as expected for all the samples""" if new != old and new != [tokenizer.cls_token_id]: print("ISSUE in strided for ID:", example["id"]) print("New:", tokenizer.decode(new)) print("Old:", tokenizer.decode(old), end="\n\n") if slice[-1] == tokenizer.sep_token_id: break return { "example_id": example["id"], "input_ids": inputs, "labels": { "start_token": answers_start_token, "end_token": answers_end_token, "category": answers_category, }, } def prepare_inputs(example, tokenizer, doc_stride=2048, max_length=4096, assertion=False): example = get_strided_contexts_and_ans( example, tokenizer, doc_stride=doc_stride, max_length=max_length, assertion=assertion, ) return example def save_to_disk(hf_data, file_name): with jsonlines.open(file_name, "a") as writer: for example in tqdm(hf_data, total=len(hf_data), desc="Saving samples ... "): labels = example["labels"] for ids, start, end, cat in zip( example["input_ids"], labels["start_token"], labels["end_token"], labels["category"], ): if start == -1 and end == -1: continue # leave waste samples with no answer if cat == "null" and np.random.rand() < 0.6: continue # removing 50 % samples writer.write( { "input_ids": ids, "start_token": start, "end_token": end, "category": CATEGORY_MAPPING[cat], } ) if __name__ == "__main__": """Running area""" from datasets import load_dataset from transformers import BigBirdTokenizer data = load_dataset("natural_questions") tokenizer = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base") data = data["train" if PROCESS_TRAIN == "true" else "validation"] fn_kwargs = { "tokenizer": tokenizer, "doc_stride": DOC_STRIDE, "max_length": MAX_LENGTH, "assertion": False, } data = data.map(prepare_inputs, fn_kwargs=fn_kwargs) data = data.remove_columns(["annotations", "document", "id", "question"]) print(data) np.random.seed(SEED) cache_file_name = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl" save_to_disk(data, file_name=cache_file_name)
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/bigbird_flax.py
import json import os from dataclasses import dataclass from functools import partial from typing import Callable import flax.linen as nn import jax import jax.numpy as jnp import joblib import optax import wandb from flax import jax_utils, struct, traverse_util from flax.serialization import from_bytes, to_bytes from flax.training import train_state from flax.training.common_utils import shard from tqdm.auto import tqdm from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule class FlaxBigBirdForNaturalQuestionsModule(FlaxBigBirdForQuestionAnsweringModule): """ BigBirdForQuestionAnswering with CLS Head over the top for predicting category This way we can load its weights with FlaxBigBirdForQuestionAnswering """ config: BigBirdConfig dtype: jnp.dtype = jnp.float32 add_pooling_layer: bool = True def setup(self): super().setup() self.cls = nn.Dense(5, dtype=self.dtype) def __call__(self, *args, **kwargs): outputs = super().__call__(*args, **kwargs) cls_out = self.cls(outputs[2]) return outputs[:2] + (cls_out,) class FlaxBigBirdForNaturalQuestions(FlaxBigBirdForQuestionAnswering): module_class = FlaxBigBirdForNaturalQuestionsModule def calculate_loss_for_nq(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooler_labels): def cross_entropy(logits, labels, reduction=None): """ Args: logits: bsz, seqlen, vocab_size labels: bsz, seqlen """ vocab_size = logits.shape[-1] labels = (labels[..., None] == jnp.arange(vocab_size)[None]).astype("f4") logits = jax.nn.log_softmax(logits, axis=-1) loss = -jnp.sum(labels * logits, axis=-1) if reduction is not None: loss = reduction(loss) return loss cross_entropy = partial(cross_entropy, reduction=jnp.mean) start_loss = cross_entropy(start_logits, start_labels) end_loss = cross_entropy(end_logits, end_labels) pooled_loss = cross_entropy(pooled_logits, pooler_labels) return (start_loss + end_loss + pooled_loss) / 3 @dataclass class Args: model_id: str = "google/bigbird-roberta-base" logging_steps: int = 3000 save_steps: int = 10500 block_size: int = 128 num_random_blocks: int = 3 batch_size_per_device: int = 1 max_epochs: int = 5 # tx_args lr: float = 3e-5 init_lr: float = 0.0 warmup_steps: int = 20000 weight_decay: float = 0.0095 save_dir: str = "bigbird-roberta-natural-questions" base_dir: str = "training-expt" tr_data_path: str = "data/nq-training.jsonl" val_data_path: str = "data/nq-validation.jsonl" def __post_init__(self): os.makedirs(self.base_dir, exist_ok=True) self.save_dir = os.path.join(self.base_dir, self.save_dir) self.batch_size = self.batch_size_per_device * jax.device_count() @dataclass class DataCollator: pad_id: int max_length: int = 4096 # no dynamic padding on TPUs def __call__(self, batch): batch = self.collate_fn(batch) batch = jax.tree_util.tree_map(shard, batch) return batch def collate_fn(self, features): input_ids, attention_mask = self.fetch_inputs(features["input_ids"]) batch = { "input_ids": jnp.array(input_ids, dtype=jnp.int32), "attention_mask": jnp.array(attention_mask, dtype=jnp.int32), "start_labels": jnp.array(features["start_token"], dtype=jnp.int32), "end_labels": jnp.array(features["end_token"], dtype=jnp.int32), "pooled_labels": jnp.array(features["category"], dtype=jnp.int32), } return batch def fetch_inputs(self, input_ids: list): inputs = [self._fetch_inputs(ids) for ids in input_ids] return zip(*inputs) def _fetch_inputs(self, input_ids: list): attention_mask = [1 for _ in range(len(input_ids))] while len(input_ids) < self.max_length: input_ids.append(self.pad_id) attention_mask.append(0) return input_ids, attention_mask def get_batched_dataset(dataset, batch_size, seed=None): if seed is not None: dataset = dataset.shuffle(seed=seed) for i in range(len(dataset) // batch_size): batch = dataset[i * batch_size : (i + 1) * batch_size] yield dict(batch) @partial(jax.pmap, axis_name="batch") def train_step(state, drp_rng, **model_inputs): def loss_fn(params): start_labels = model_inputs.pop("start_labels") end_labels = model_inputs.pop("end_labels") pooled_labels = model_inputs.pop("pooled_labels") outputs = state.apply_fn(**model_inputs, params=params, dropout_rng=drp_rng, train=True) start_logits, end_logits, pooled_logits = outputs return state.loss_fn( start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels, ) drp_rng, new_drp_rng = jax.random.split(drp_rng) grad_fn = jax.value_and_grad(loss_fn) loss, grads = grad_fn(state.params) metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") grads = jax.lax.pmean(grads, "batch") state = state.apply_gradients(grads=grads) return state, metrics, new_drp_rng @partial(jax.pmap, axis_name="batch") def val_step(state, **model_inputs): start_labels = model_inputs.pop("start_labels") end_labels = model_inputs.pop("end_labels") pooled_labels = model_inputs.pop("pooled_labels") outputs = state.apply_fn(**model_inputs, params=state.params, train=False) start_logits, end_logits, pooled_logits = outputs loss = state.loss_fn(start_logits, start_labels, end_logits, end_labels, pooled_logits, pooled_labels) metrics = jax.lax.pmean({"loss": loss}, axis_name="batch") return metrics class TrainState(train_state.TrainState): loss_fn: Callable = struct.field(pytree_node=False) @dataclass class Trainer: args: Args data_collator: Callable train_step_fn: Callable val_step_fn: Callable model_save_fn: Callable logger: wandb scheduler_fn: Callable = None def create_state(self, model, tx, num_train_steps, ckpt_dir=None): params = model.params state = TrainState.create( apply_fn=model.__call__, params=params, tx=tx, loss_fn=calculate_loss_for_nq, ) if ckpt_dir is not None: params, opt_state, step, args, data_collator = restore_checkpoint(ckpt_dir, state) tx_args = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": num_train_steps, "weight_decay": args.weight_decay, } tx, lr = build_tx(**tx_args) state = train_state.TrainState( step=step, apply_fn=model.__call__, params=params, tx=tx, opt_state=opt_state, ) self.args = args self.data_collator = data_collator self.scheduler_fn = lr model.params = params state = jax_utils.replicate(state) return state def train(self, state, tr_dataset, val_dataset): args = self.args total = len(tr_dataset) // args.batch_size rng = jax.random.PRNGKey(0) drp_rng = jax.random.split(rng, jax.device_count()) for epoch in range(args.max_epochs): running_loss = jnp.array(0, dtype=jnp.float32) tr_dataloader = get_batched_dataset(tr_dataset, args.batch_size, seed=epoch) i = 0 for batch in tqdm(tr_dataloader, total=total, desc=f"Running EPOCH-{epoch}"): batch = self.data_collator(batch) state, metrics, drp_rng = self.train_step_fn(state, drp_rng, **batch) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 if i % args.logging_steps == 0: state_step = jax_utils.unreplicate(state.step) tr_loss = running_loss.item() / i lr = self.scheduler_fn(state_step - 1) eval_loss = self.evaluate(state, val_dataset) logging_dict = { "step": state_step.item(), "eval_loss": eval_loss.item(), "tr_loss": tr_loss, "lr": lr.item(), } tqdm.write(str(logging_dict)) self.logger.log(logging_dict, commit=True) if i % args.save_steps == 0: self.save_checkpoint(args.save_dir + f"-e{epoch}-s{i}", state=state) def evaluate(self, state, dataset): dataloader = get_batched_dataset(dataset, self.args.batch_size) total = len(dataset) // self.args.batch_size running_loss = jnp.array(0, dtype=jnp.float32) i = 0 for batch in tqdm(dataloader, total=total, desc="Evaluating ... "): batch = self.data_collator(batch) metrics = self.val_step_fn(state, **batch) running_loss += jax_utils.unreplicate(metrics["loss"]) i += 1 return running_loss / i def save_checkpoint(self, save_dir, state): state = jax_utils.unreplicate(state) print(f"SAVING CHECKPOINT IN {save_dir}", end=" ... ") self.model_save_fn(save_dir, params=state.params) with open(os.path.join(save_dir, "opt_state.msgpack"), "wb") as f: f.write(to_bytes(state.opt_state)) joblib.dump(self.args, os.path.join(save_dir, "args.joblib")) joblib.dump(self.data_collator, os.path.join(save_dir, "data_collator.joblib")) with open(os.path.join(save_dir, "training_state.json"), "w") as f: json.dump({"step": state.step.item()}, f) print("DONE") def restore_checkpoint(save_dir, state): print(f"RESTORING CHECKPOINT FROM {save_dir}", end=" ... ") with open(os.path.join(save_dir, "flax_model.msgpack"), "rb") as f: params = from_bytes(state.params, f.read()) with open(os.path.join(save_dir, "opt_state.msgpack"), "rb") as f: opt_state = from_bytes(state.opt_state, f.read()) args = joblib.load(os.path.join(save_dir, "args.joblib")) data_collator = joblib.load(os.path.join(save_dir, "data_collator.joblib")) with open(os.path.join(save_dir, "training_state.json"), "r") as f: training_state = json.load(f) step = training_state["step"] print("DONE") return params, opt_state, step, args, data_collator def scheduler_fn(lr, init_lr, warmup_steps, num_train_steps): decay_steps = num_train_steps - warmup_steps warmup_fn = optax.linear_schedule(init_value=init_lr, end_value=lr, transition_steps=warmup_steps) decay_fn = optax.linear_schedule(init_value=lr, end_value=1e-7, transition_steps=decay_steps) lr = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[warmup_steps]) return lr def build_tx(lr, init_lr, warmup_steps, num_train_steps, weight_decay): def weight_decay_mask(params): params = traverse_util.flatten_dict(params) mask = {k: (v[-1] != "bias" and v[-2:] != ("LayerNorm", "scale")) for k, v in params.items()} return traverse_util.unflatten_dict(mask) lr = scheduler_fn(lr, init_lr, warmup_steps, num_train_steps) tx = optax.adamw(learning_rate=lr, weight_decay=weight_decay, mask=weight_decay_mask) return tx, lr
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/sweep_flax.yaml
command: - python3 - train.py method: random parameters: lr: values: [4e-5, 3e-5] warmup_steps: values: [20000, 15000, 10000, 5000] weight_decay: distribution: normal mu: 1e-2 sigma: 2e-3 metric: name: eval_loss goal: minimize
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/evaluate.py
import jax import jax.numpy as jnp from bigbird_flax import FlaxBigBirdForNaturalQuestions from datasets import load_from_disk from transformers import BigBirdTokenizerFast CATEGORY_MAPPING = {0: "null", 1: "short", 2: "long", 3: "yes", 4: "no"} PUNCTUATION_SET_TO_EXCLUDE = set("".join(["‘", "’", "´", "`", ".", ",", "-", '"'])) def get_sub_answers(answers, begin=0, end=None): return [" ".join(x.split(" ")[begin:end]) for x in answers if len(x.split(" ")) > 1] def expand_to_aliases(given_answers, make_sub_answers=False): if make_sub_answers: # if answers are longer than one word, make sure a predictions is correct if it coresponds to the complete 1: or :-1 sub word # *e.g.* if the correct answer contains a prefix such as "the", or "a" given_answers = ( given_answers + get_sub_answers(given_answers, begin=1) + get_sub_answers(given_answers, end=-1) ) answers = [] for answer in given_answers: alias = answer.replace("_", " ").lower() alias = "".join(c if c not in PUNCTUATION_SET_TO_EXCLUDE else " " for c in alias) answers.append(" ".join(alias.split()).strip()) return set(answers) def get_best_valid_start_end_idx(start_scores, end_scores, top_k=1, max_size=100): best_start_scores, best_start_idx = jax.lax.top_k(start_scores, top_k) best_end_scores, best_end_idx = jax.lax.top_k(end_scores, top_k) widths = best_end_idx[:, None] - best_start_idx[None, :] mask = jnp.logical_or(widths < 0, widths > max_size) scores = (best_end_scores[:, None] + best_start_scores[None, :]) - (1e8 * mask) best_score = jnp.argmax(scores).item() return best_start_idx[best_score % top_k], best_end_idx[best_score // top_k] def format_dataset(sample): question = sample["question"]["text"] context = sample["document"]["tokens"]["token"] is_html = sample["document"]["tokens"]["is_html"] long_answers = sample["annotations"]["long_answer"] short_answers = sample["annotations"]["short_answers"] context_string = " ".join([context[i] for i in range(len(context)) if not is_html[i]]) # 0 - No ; 1 - Yes for answer in sample["annotations"]["yes_no_answer"]: if answer == 0 or answer == 1: return { "question": question, "context": context_string, "short": [], "long": [], "category": "no" if answer == 0 else "yes", } short_targets = [] for s in short_answers: short_targets.extend(s["text"]) short_targets = list(set(short_targets)) long_targets = [] for s in long_answers: if s["start_token"] == -1: continue answer = context[s["start_token"] : s["end_token"]] html = is_html[s["start_token"] : s["end_token"]] new_answer = " ".join([answer[i] for i in range(len(answer)) if not html[i]]) if new_answer not in long_targets: long_targets.append(new_answer) category = "long_short" if len(short_targets + long_targets) > 0 else "null" return { "question": question, "context": context_string, "short": short_targets, "long": long_targets, "category": category, } def main(): dataset = load_from_disk("natural-questions-validation") dataset = dataset.map(format_dataset).remove_columns(["annotations", "document", "id"]) print(dataset) short_validation_dataset = dataset.filter(lambda x: (len(x["question"]) + len(x["context"])) < 4 * 4096) short_validation_dataset = short_validation_dataset.filter(lambda x: x["category"] != "null") short_validation_dataset model_id = "vasudevgupta/flax-bigbird-natural-questions" model = FlaxBigBirdForNaturalQuestions.from_pretrained(model_id) tokenizer = BigBirdTokenizerFast.from_pretrained(model_id) @jax.jit def forward(*args, **kwargs): start_logits, end_logits, pooled_logits = model(*args, **kwargs) return start_logits, end_logits, jnp.argmax(pooled_logits, axis=-1) def evaluate(example): # encode question and context so that they are separated by a tokenizer.sep_token and cut at max_length inputs = tokenizer( example["question"], example["context"], return_tensors="np", max_length=4096, padding="max_length", truncation=True, ) start_scores, end_scores, category = forward(**inputs) predicted_category = CATEGORY_MAPPING[category.item()] example["targets"] = example["long"] + example["short"] if example["category"] in ["yes", "no", "null"]: example["targets"] = [example["category"]] example["has_tgt"] = example["category"] != "null" # Now target can be: "yes", "no", "null", "list of long & short answers" if predicted_category in ["yes", "no", "null"]: example["output"] = [predicted_category] example["match"] = example["output"] == example["targets"] example["has_pred"] = predicted_category != "null" return example max_size = 38 if predicted_category == "short" else 1024 start_score, end_score = get_best_valid_start_end_idx( start_scores[0], end_scores[0], top_k=8, max_size=max_size ) input_ids = inputs["input_ids"][0].tolist() example["output"] = [tokenizer.decode(input_ids[start_score : end_score + 1])] answers = expand_to_aliases(example["targets"], make_sub_answers=True) predictions = expand_to_aliases(example["output"]) # some preprocessing to both prediction and answer answers = {"".join(a.split()) for a in answers} predictions = {"".join(p.split()) for p in predictions} predictions = {s for s in predictions if s not in ["``", "''", "`", "'"]} # if there is a common element, it's a exact match example["match"] = len(list(answers & predictions)) > 0 example["has_pred"] = predicted_category != "null" and len(predictions) > 0 return example short_validation_dataset = short_validation_dataset.map(evaluate) total = len(short_validation_dataset) matched = len(short_validation_dataset.filter(lambda x: x["match"] == 1)) print("EM score:", (matched / total) * 100, "%") if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/big_bird/train.py
import os from dataclasses import replace import jax import wandb from bigbird_flax import Args, DataCollator, FlaxBigBirdForNaturalQuestions, Trainer, build_tx, train_step, val_step from datasets import load_dataset from flax import jax_utils from transformers import BigBirdTokenizerFast if __name__ == "__main__": print("#################### AVAILABLE DEVICES ####################") print(jax.devices()) print("###########################################################") # setup for wandb sweep args = Args() logger = wandb.init(project="bigbird-natural-questions", config=args.__dict__) wandb_args = dict(logger.config) del wandb_args["batch_size"] args = replace(args, **wandb_args) base_dir = args.base_dir + "-" + wandb.run.id args = replace(args, base_dir=base_dir) print(args) tr_dataset = load_dataset("json", data_files=args.tr_data_path)["train"] val_dataset = load_dataset("json", data_files=args.val_data_path)["train"] # drop extra batch for now indices = range(len(tr_dataset) - len(tr_dataset) % args.batch_size) tr_dataset = tr_dataset.shuffle().select(indices) indices = range(len(val_dataset) - len(val_dataset) % args.batch_size) val_dataset = val_dataset.shuffle().select(indices) if os.environ.get("TRAIN_ON_SMALL", "false") == "true": tr_dataset = tr_dataset.shuffle().select(range(80000)) val_dataset = val_dataset.shuffle().select(range(8000)) print(tr_dataset) print(val_dataset) model = FlaxBigBirdForNaturalQuestions.from_pretrained( args.model_id, block_size=args.block_size, num_random_blocks=args.num_random_blocks ) tokenizer = BigBirdTokenizerFast.from_pretrained(args.model_id) data_collator = DataCollator(pad_id=tokenizer.pad_token_id, max_length=4096) tx_args = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": args.max_epochs * (len(tr_dataset) // args.batch_size), "weight_decay": args.weight_decay, } tx, lr = build_tx(**tx_args) trainer = Trainer( args=args, data_collator=data_collator, model_save_fn=model.save_pretrained, train_step_fn=train_step, val_step_fn=val_step, logger=logger, scheduler_fn=lr, ) ckpt_dir = None state = trainer.create_state(model, tx, num_train_steps=tx_args["num_train_steps"], ckpt_dir=ckpt_dir) try: trainer.train(state, tr_dataset, val_dataset) except KeyboardInterrupt: print("Oooops; TRAINING STOPPED UNFORTUNATELY") print("SAVING WEIGHTS IN `final-weights`") params = jax_utils.unreplicate(state.params) model.save_pretrained(os.path.join(args.base_dir, "final-weights"), params=params)
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/dataset-streaming/README.md
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Language model training examples in streaming mode The following examples showcase how to train a language model from scratch using the JAX/Flax backend. JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. All of the following examples make use of [dataset streaming](https://huggingface.co/docs/datasets/master/dataset_streaming.html), therefore allowing to train models on massive datasets\ without ever having to download the full dataset. ## Masked language modeling In the following, we demonstrate how to train a bi-directional transformer model using masked language modeling objective as introduced in [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805). More specifically, we demonstrate how JAX/Flax and dataset streaming can be leveraged to pre-train [**`roberta-base`**](https://huggingface.co/roberta-base) in English on a single TPUv3-8 pod for 10000 update steps. The example script uses the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. Let's start by creating a model repository to save the trained model and logs. Here we call the model `"english-roberta-base-dummy"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ``` huggingface-cli repo create english-roberta-base-dummy ``` Next we clone the model repository to add the tokenizer and model files. ``` git clone https://huggingface.co/<your-username>/english-roberta-base-dummy ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ``` cd english-roberta-base-dummy git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_mlm_flax.py`. ```bash export MODEL_DIR="./english-roberta-base-dummy" ln -s ~/transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py ./ ``` ### Copy config and tokenizer of existing model In this example, we will simply copy an existing config and tokenizer in English. You can run the following code in a Python shell to do so. ```python from transformers import RobertaTokenizerFast, RobertaConfig model_dir = "./english-roberta-base-dummy" tokenizer = RobertaTokenizerFast.from_pretrained("roberta-base") config = RobertaConfig.from_pretrained("roberta-base") tokenizer.save_pretrained(model_dir) config.save_pretrained(model_dir) ``` ### Train model Next we can run the example script to pretrain the model. Compared to the default [`run_mlm_flax`](https://github.com/huggingface/transformers/blob/main/examples/flax/language-modeling/run_mlm_flax.py), we introduced 4 new training settings: - `num_train_steps` - how many update steps should be run. - `num_eval_samples` - how many training samples should be taken for evaluation. - `logging_steps` - at what rate should the training loss be logged. - `eval_steps` - at what rate should evaluation be run. 10K update steps ```bash ./run_mlm_flax_stream.py \ --output_dir="${MODEL_DIR}" \ --model_type="roberta" \ --config_name="${MODEL_DIR}" \ --tokenizer_name="${MODEL_DIR}" \ --dataset_name="oscar" \ --dataset_config_name="unshuffled_deduplicated_en" \ --max_seq_length="128" \ --per_device_train_batch_size="128" \ --per_device_eval_batch_size="128" \ --learning_rate="3e-4" \ --warmup_steps="1000" \ --overwrite_output_dir \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --num_train_steps="10000" \ --num_eval_samples="5000" \ --logging_steps="250" \ --eval_steps="1000" \ --push_to_hub ```
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ import logging import os import sys import time from collections import defaultdict from dataclasses import dataclass, field # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from pathlib import Path from typing import Dict, List, Optional, Tuple import datasets import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import jax_utils, traverse_util from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from tqdm import tqdm from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) if datasets.__version__ <= "1.8.0": raise ValueError("Make sure to upgrade `datasets` to a version >= 1.9.0 to use dataset streaming") MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) line_by_line: bool = field( default=False, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, ) text_column_name: str = field( default="text", metadata={"help": "The name of the column to retrieve the training text."} ) shuffle_buffer_size: int = field( default=10000, metadata={"help": "The number of examples to pre-load for shuffling."} ) num_train_steps: int = field(default=50000, metadata={"help": "The number of training steps."}) num_eval_samples: int = field(default=50000, metadata={"help": "The number of samples to be used for evaluation"}) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." @flax.struct.dataclass class FlaxDataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm_probability: float = 0.15 def __post_init__(self): if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]: # Handle dict or lists with proper padding and conversion to tensor. batch = self.tokenizer.pad(examples, return_tensors=TensorType.NUMPY) # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) return batch def mask_tokens( self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] ) -> Tuple[jnp.ndarray, jnp.ndarray]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: num_samples = len(samples_idx) samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size batch_idx = np.split(samples_idx, sections_split) return batch_idx def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length): """ The training iterator is advanced so that after groupifying the samples, `num_samples` of length `max_seq_length` are returned. """ num_total_tokens = max_seq_length * num_samples samples = defaultdict(list) i = 0 while i < num_total_tokens: tokenized_samples = next(train_iterator) i += len(tokenized_samples["input_ids"]) # concatenate tokenized samples to list (excluding "id" and "text") samples = { k: samples[k] + tokenized_samples[k] for k in ["input_ids", "attention_mask", "special_tokens_mask"] } # Concatenated tokens are split to lists of length `max_seq_length`. # Note that remainedr of % max_seq_length are thrown away. def group_texts(examples): result = { k: [t[i : i + max_seq_length] for i in range(0, num_total_tokens, max_seq_length)] for k, t in examples.items() } return result grouped_samples = group_texts(samples) return grouped_samples def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) if __name__ == "__main__": # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level="INFO", datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, streaming=True, split="train", ) if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more # efficient when it receives the `special_tokens_mask`. def tokenize_function(examples): return tokenizer(examples[data_args.text_column_name], return_special_tokens_mask=True) tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=list(dataset.features.keys())) shuffle_seed = training_args.seed tokenized_datasets = tokenized_datasets.shuffle(buffer_size=data_args.shuffle_buffer_size, seed=shuffle_seed) has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) if model_args.model_name_or_path: model = FlaxAutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) else: model = FlaxAutoModelForMaskedLM.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() # define number steps per stream epoch num_train_steps = data_args.num_train_steps # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. # Note that this mask is specifically adapted for FlaxBERT-like models. # For other models, one should correct the layer norm parameter naming # accordingly. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw) # Define gradient update step fn def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average loss = loss.sum() / label_mask.sum() return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean( {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" ) return new_state, metrics, new_dropout_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # compute accuracy accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask # summarize metrics metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 train_start = time.time() train_metrics = [] eval_metrics = [] training_iter = iter(tokenized_datasets) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) eval_samples = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) steps = tqdm(range(num_train_steps), desc="Training...", position=0) for step in range(num_train_steps): # ======================== Training ================================ try: samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) except StopIteration: # Once the end of the dataset stream is reached, the training iterator # is reinitialized and reshuffled and a new eval dataset is randomly chosen. shuffle_seed += 1 tokenized_datasets.set_epoch(shuffle_seed) training_iter = iter(tokenized_datasets) eval_dataset = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) # process input samples model_inputs = data_collator(samples) # Model forward model_inputs = shard(model_inputs.data) state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) train_metrics.append(train_metric) if step % training_args.logging_steps == 0 and step > 0: steps.write( f"Step... ({step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" f" {train_metric['learning_rate'].mean()})" ) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, step) train_metrics = [] # ======================== Evaluating ============================== if step % training_args.eval_steps == 0 and step > 0: # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(data_args.num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=1)): # process input samples batch_eval_samples = {k: [v[idx] for idx in batch_idx] for k, v in eval_samples.items()} model_inputs = data_collator(batch_eval_samples) # Model forward model_inputs = shard(model_inputs.data) metrics = p_eval_step(state.params, model_inputs) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar steps.desc = ( f"Step... ({step + 1}/{num_train_steps} | Loss: {eval_metrics['loss']}, Acc:" f" {eval_metrics['accuracy']})" ) if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, step) eval_metrics = [] # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained( training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f"Saving weights and logs of step {step+1}", ) # update tqdm bar steps.update(1)
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/hybrid_clip/requirements.txt
jax>=0.2.8 jaxlib>=0.1.59 flax>=0.3.5 optax>=0.0.8 -f https://download.pytorch.org/whl/torch_stable.html torch==1.9.0+cpu -f https://download.pytorch.org/whl/torch_stable.html torchvision==0.10.0+cpu
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/hybrid_clip/configuration_hybrid_clip.py
import copy from transformers.configuration_utils import PretrainedConfig from transformers.utils import logging logger = logging.get_logger(__name__) class HybridCLIPConfig(PretrainedConfig): r""" :class:`HybridCLIPConfig` is the configuration class to store the configuration of a :class:`~HybridCLIPModel`. It is used to instantiate HybridCLIPModel model according to the specified arguments, defining the text model and vision model configs. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: text_config_dict (:obj:`dict`): Dictionary of configuration options that defines text model config. vision_config_dict (:obj:`dict`): Dictionary of configuration options that defines vison model config. projection_dim (:obj:`int`, `optional`, defaults to 512): Dimentionality of text and vision projection layers. kwargs (`optional`): Dictionary of keyword arguments. Examples:: >>> from transformers import BertConfig, CLIPConfig, HybridCLIPConfig, FlaxHybridCLIP >>> # Initializing a BERT and CLIP configuration >>> config_text = BertConfig() >>> config_vision = CLIPConfig() >>> config = HybridCLIPConfig.from_text_vision_configs(config_text, config_vision, projection_dim=512) >>> # Initializing a BERT and CLIPVision model >>> model = EncoderDecoderModel(config=config) >>> # Accessing the model configuration >>> config_text = model.config.text_config >>> config_vision = model.config.vision_config >>> # Saving the model, including its configuration >>> model.save_pretrained('my-model') >>> # loading model and config from pretrained folder >>> encoder_decoder_config = HybridCLIPConfig.from_pretrained('my-model') >>> model = FlaxHybridCLIP.from_pretrained('my-model', config=encoder_decoder_config) """ model_type = "hybrid-clip" is_composition = True def __init__(self, projection_dim=512, **kwargs): super().__init__(**kwargs) if "text_config" not in kwargs: raise ValueError("`text_config` can not be `None`.") if "vision_config" not in kwargs: raise ValueError("`vision_config` can not be `None`.") text_config = kwargs.pop("text_config") vision_config = kwargs.pop("vision_config") text_model_type = text_config.pop("model_type") vision_model_type = vision_config.pop("model_type") from transformers import AutoConfig self.text_config = AutoConfig.for_model(text_model_type, **text_config) if vision_model_type == "clip": self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config).vision_config elif vision_model_type == "clip_vision_model": from transformers import CLIPVisionConfig self.vision_config = CLIPVisionConfig(**vision_config) else: self.vision_config = AutoConfig.for_model(vision_model_type, **vision_config) self.projection_dim = projection_dim self.initializer_factor = 1.0 @classmethod def from_text_vision_configs(cls, text_config: PretrainedConfig, vision_config: PretrainedConfig, **kwargs): r""" Instantiate a :class:`HybridCLIPConfig` (or a derived class) from text model configuration and vision model configuration. Returns: :class:`HybridCLIPConfig`: An instance of a configuration object """ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs) def to_dict(self): """ Serializes this instance to a Python dictionary. Override the default :meth:`~transformers.PretrainedConfig.to_dict`. Returns: :obj:`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance, """ output = copy.deepcopy(self.__dict__) output["text_config"] = self.text_config.to_dict() output["vision_config"] = self.vision_config.to_dict() output["model_type"] = self.__class__.model_type return output
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/hybrid_clip/README.md
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Vision-Text dual encoder model training examples > Note: This example is experimental and might not give the best possible results The following example showcases how to train a CLIP like vision-text dual encoder model using a pre-trained vision and text encoder using the JAX/Flax backend. Such a model can be used for natural language image search and potentially zero-shot image classification. The model is inspired by the [CLIP](https://openai.com/blog/clip/) approach, introduced by Alec Radford et al. The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their captions into the same embedding space, such that the caption embeddings are located near the embeddings of the images they describe. JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. In this example we will use the vision model from [CLIP](https://huggingface.co/models?filter=clip) as the image encoder and [`roberta-base`](https://huggingface.co/roberta-base) as the text encoder. Note that one can also use the [ViT](https://huggingface.co/models?filter=vit) model as image encoder and any other BERT or ROBERTa model as text encoder. To train the model on languages other than English one should choose a text encoder trained on the desired language and a image-text dataset in that language. One such dataset is [WIT](https://github.com/google-research-datasets/wit). Let's start by creating a model repository to save the trained model and logs. Here we call the model `"clip-roberta-base"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ``` huggingface-cli repo create clip-roberta-base ``` Next we clone the model repository to add the tokenizer and model files. ``` git clone https://huggingface.co/<your-username>/clip-roberta-base ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ``` cd clip-roberta-base git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_hybrid_clip.py`. ```bash export MODEL_DIR="./clip-roberta-base ln -s ~/transformers/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py run_hybrid_clip.py ``` ## How to use the `FlaxHybridCLIP` model: The `FlaxHybridCLIP` class let's you load any text and vision encoder model to create a dual encoder. Here is an example of how to load the model using pre-trained text and vision models. ```python from modeling_hybrid_clip import FlaxHybridCLIP model = FlaxHybridCLIP.from_text_vision_pretrained("bert-base-uncased", "openai/clip-vit-base-patch32") # save the model model.save_pretrained("bert-clip") # load the saved model model = FlaxHybridCLIP.from_pretrained("bert-clip") ``` If the checkpoints are in PyTorch then one could pass `text_from_pt=True` and `vision_from_pt=True`. This will load the model PyTorch checkpoints convert them to flax and load the model. ```python model = FlaxHybridCLIP.from_text_vision_pretrained("bert-base-uncased", "openai/clip-vit-base-patch32", text_from_pt=True, vision_from_pt=True) ``` This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also loaded using the pre-trained weights. ## Prepare the dataset We will use the MS-COCO dataset to train our dual encoder model. MS-COCO contains over 82,000 images, each of which has at least 5 different caption annotations. The dataset is usually used for image captioning tasks, but we can repurpose the image-caption pairs to train our dual encoder model for image search. ### Download and extract the data. It consists of two compressed folders: one with images, and the other—with associated image captions. Note that the compressed images folder is 13GB in size. ```bash wget http://images.cocodataset.org/annotations/annotations_trainval2014.zip wget http://images.cocodataset.org/zips/train2014.zip unzip annotations_trainval2014.zip unzip train2014.zip mkdir coco_dataset mv train2014 coco_dataset/ mv annotations coco_dataset/ ``` ### Prepare dataset files and split the dataset. ```python import json import collections images_dir = "coco_dataset/train2014" annotation_file = "coco_dataset/annotations/captions_train2014.json" with open(annotation_file, "r") as f: annotations = json.load(f)["annotations"] image_path_to_caption = collections.defaultdict(list) for element in annotations: caption = f"{element['caption'].lower().rstrip('.')}" image_path = images_dir + "/COCO_train2014_" + "%012d.jpg" % (element["image_id"]) image_path_to_caption[image_path].append(caption) lines = [] for image_path, captions in image_path_to_caption.items(): lines.append(json.dumps({"image_path": image_path, "captions": captions})) train_lines = lines[:-8000] valid_line = lines[-8000:] with open("coco_dataset/train_dataset.json", "w") as f: f.write("\n".join(train_lines)) with open("coco_dataset/valid_dataset.json", "w") as f: f.write("\n".join(valid_line)) ``` > Note: The data loading and processing part of this script can still be improved for maximum performance. In particular one should decode the images beforehand and use those instead decoding them each time. If the dataset is small or if you have huge disk space the you could also pre-process all the dataset beforehand and then use it. ## Train the model Next we can run the example script to train the model: ```bash python run_hybrid_clip.py \ --output_dir ${MODEL_DIR} \ --text_model_name_or_path="roberta-base" \ --vision_model_name_or_path="openai/clip-vit-base-patch32" \ --tokenizer_name="roberta-base" \ --train_file="coco_dataset/train_dataset.json" \ --validation_file="coco_dataset/validation_dataset.json" \ --do_train --do_eval \ --num_train_epochs="40" --max_seq_length 96 \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ --overwrite_output_dir \ --preprocessing_num_workers 32 \ --push_to_hub ``` This should finish in ~1h50 mins with min validation loss 2.43. Training statistics can be accessed on [tfhub.de](https://tensorboard.dev/experiment/RUNPYd1yRgSD5kZSb9hDig/#scalars)
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/hybrid_clip/modeling_hybrid_clip.py
# coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp from configuration_hybrid_clip import HybridCLIPConfig from flax.core.frozen_dict import FrozenDict from transformers import FLAX_MODEL_MAPPING, FlaxCLIPVisionModel from transformers.modeling_flax_utils import FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPOutput from transformers.utils import logging logger = logging.get_logger(__name__) class FlaxHybridCLIPModule(nn.Module): config: HybridCLIPConfig dtype: jnp.dtype = jnp.float32 def setup(self): text_config = self.config.text_config vision_config = self.config.vision_config self.projection_dim = self.config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size text_module = FLAX_MODEL_MAPPING[self.config.text_config.__class__].module_class vision_module = FLAX_MODEL_MAPPING.get(self.config.vision_config.__class__, FlaxCLIPVisionModel).module_class self.text_model = text_module(text_config, dtype=self.dtype) self.vision_model = vision_module(vision_config, dtype=self.dtype) self.visual_projection = nn.Dense( self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False, ) self.text_projection = nn.Dense( self.projection_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.02), use_bias=False, ) self.logit_scale = self.param("logit_scale", jax.nn.initializers.ones, []) def __call__( self, input_ids=None, pixel_values=None, attention_mask=None, position_ids=None, token_type_ids=None, deterministic: bool = True, output_attentions=None, output_hidden_states=None, return_dict=None, ): return_dict = return_dict if return_dict is not None else self.config.return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True) text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True) # cosine similarity as logits logit_scale = jnp.exp(self.logit_scale) logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale logits_per_image = logits_per_text.T if not return_dict: return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return FlaxCLIPOutput( logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) class FlaxHybridCLIP(FlaxPreTrainedModel): config_class = HybridCLIPConfig module_class = FlaxHybridCLIPModule def __init__( self, config: HybridCLIPConfig, input_shape: Optional[Tuple] = None, seed: int = 0, dtype: jnp.dtype = jnp.float32, **kwargs, ): if input_shape is None: input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3)) module = self.module_class(config=config, dtype=dtype, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensor input_ids = jnp.zeros(input_shape[0], dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0]) token_type_ids = jnp.ones_like(input_ids) attention_mask = jnp.ones_like(input_ids) pixel_values = jax.random.normal(rng, input_shape[1]) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids, token_type_ids)["params"] def __call__( self, input_ids, pixel_values, attention_mask=None, position_ids=None, token_type_ids=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(pixel_values, dtype=jnp.float32), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), not train, output_attentions, output_hidden_states, return_dict, rngs=rngs, ) def get_text_features( self, input_ids, attention_mask=None, position_ids=None, token_type_ids=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False, ): r""" Args: input_ids (:obj:`numpy.ndarray` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.PreTrainedTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ Returns: text_features (:obj:`jnp.ndarray` of shape :obj:`(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of text model. """ if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _get_features(module, input_ids, attention_mask, position_ids, token_type_ids, deterministic): text_outputs = module.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, deterministic=deterministic, ) pooled_output = text_outputs[1] text_features = module.text_projection(pooled_output) return text_features return self.module.apply( {"params": params or self.params}, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), jnp.array(position_ids, dtype="i4"), jnp.array(token_type_ids, dtype="i4"), not train, method=_get_features, rngs=rngs, ) def get_image_features( self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False ): r""" Args: pixel_values (:obj:`numpy.ndarray` of shape :obj:`(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using :class:`~transformers.ImageFeatureExtractionMixin`. See :meth:`transformers.ImageFeatureExtractionMixin.__call__` for details. Returns: image_features (:obj:`jnp.ndarray` of shape :obj:`(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of vision model. """ # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng def _get_features(module, pixel_values, deterministic): vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic) pooled_output = vision_outputs[1] # pooled_output image_features = module.visual_projection(pooled_output) return image_features return self.module.apply( {"params": params or self.params}, jnp.array(pixel_values, dtype=jnp.float32), not train, method=_get_features, rngs=rngs, ) @classmethod def from_text_vision_pretrained( cls, text_model_name_or_path: str = None, vision_model_name_or_path: str = None, *model_args, **kwargs, ) -> FlaxPreTrainedModel: """ Params: text_model_name_or_path (:obj: `str`, `optional`): Information necessary to initiate the text model. Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a Flax model using the provided conversion scripts and loading the Flax model afterwards. vision_model_name_or_path (:obj: `str`, `optional`, defaults to `None`): Information necessary to initiate the vision model. Can be either: - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``. - A path to a `directory` containing model weights saved using :func:`~transformers.FlaxPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``. - A path or url to a `PyTorch checkpoint folder` (e.g, ``./pt_model``). In this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the PyTorch checkpoint in a Flax model using the provided conversion scripts and loading the Flax model afterwards. model_args (remaining positional arguments, `optional`): All remaning positional arguments will be passed to the underlying model's ``__init__`` method. kwargs (remaining dictionary of keyword arguments, `optional`): Can be used to update the configuration object (after it being loaded) and initiate the model (e.g., :obj:`output_attentions=True`). - To update the text configuration, use the prefix `text_` for each configuration parameter. - To update the vision configuration, use the prefix `vision_` for each configuration parameter. - To update the parent model configuration, do not use a prefix for each configuration parameter. Behaves differently depending on whether a :obj:`config` is provided or automatically loaded. Example:: >>> from transformers import FlaxHybridCLIP >>> # initialize a model from pretrained BERT and CLIP models. Note that the projection layers will be randomly initialized. >>> # If using CLIP's vision model the vision projection layer will be initialized using pre-trained weights >>> model = FlaxHybridCLIP.from_text_vision_pretrained('bert-base-uncased', 'openai/clip-vit-base-patch32') >>> # saving model after fine-tuning >>> model.save_pretrained("./bert-clip") >>> # load fine-tuned model >>> model = FlaxHybridCLIP.from_pretrained("./bert-clip") """ kwargs_text = { argument[len("text_") :]: value for argument, value in kwargs.items() if argument.startswith("text_") } kwargs_vision = { argument[len("vision_") :]: value for argument, value in kwargs.items() if argument.startswith("vision_") } # remove text, vision kwargs from kwargs for key in kwargs_text.keys(): del kwargs["text_" + key] for key in kwargs_vision.keys(): del kwargs["vision_" + key] # Load and initialize the text and vision model text_model = kwargs_text.pop("model", None) if text_model is None: assert ( text_model_name_or_path is not None ), "If `model` is not defined as an argument, a `text_model_name_or_path` has to be defined" from transformers import FlaxAutoModel if "config" not in kwargs_text: from transformers import AutoConfig text_config = AutoConfig.from_pretrained(text_model_name_or_path) kwargs_text["config"] = text_config text_model = FlaxAutoModel.from_pretrained(text_model_name_or_path, *model_args, **kwargs_text) vision_model = kwargs_vision.pop("model", None) if vision_model is None: assert ( vision_model_name_or_path is not None ), "If `model` is not defined as an argument, a `vision_model_name_or_path` has to be defined" from transformers import FlaxAutoModel if "config" not in kwargs_vision: from transformers import AutoConfig vision_config = AutoConfig.from_pretrained(vision_model_name_or_path) kwargs_vision["config"] = vision_config vision_model = FlaxAutoModel.from_pretrained(vision_model_name_or_path, *model_args, **kwargs_vision) # instantiate config with corresponding kwargs dtype = kwargs.pop("dtype", jnp.float32) config = HybridCLIPConfig.from_text_vision_configs(text_model.config, vision_model.config, **kwargs) # init model model = cls(config, *model_args, dtype=dtype, **kwargs) if vision_config.model_type == "clip": model.params["vision_model"]["vision_model"] = vision_model.params["vision_model"] model.params["visual_projection"]["kernel"] = vision_model.params["visual_projection"]["kernel"] else: model.params["vision_model"] = vision_model.params model.params["text_model"] = text_model.params return model
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/hybrid_clip/run_hybrid_clip.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training a CLIP like dual encoder models using text and vision encoders in the library. The script can be used to train CLIP like models for languages other than english by using a text encoder pre-trained in the desired language. Currently this script support the following vision and text models: Vision models: ViT(https://huggingface.co/models?filter=vit), CLIP (https://huggingface.co/models?filter=clip) Text models: BERT, ROBERTa (https://huggingface.co/models?filter=fill-mask) """ import json import logging import os import sys import time from dataclasses import dataclass, field from pathlib import Path from typing import Callable, Optional import jax import jax.numpy as jnp import optax import torch from flax import jax_utils from flax.jax_utils import unreplicate from flax.training import train_state from flax.training.common_utils import get_metrics, shard, shard_prng_key from modeling_hybrid_clip import FlaxHybridCLIP from torchvision.datasets import VisionDataset from torchvision.io import ImageReadMode, read_image from torchvision.transforms import CenterCrop, ConvertImageDtype, Normalize, Resize from torchvision.transforms.functional import InterpolationMode from tqdm import tqdm import transformers from transformers import AutoTokenizer, HfArgumentParser, TrainingArguments, is_tensorboard_available, set_seed logger = logging.getLogger(__name__) # Cache the result has_tensorboard = is_tensorboard_available() if has_tensorboard: try: from flax.metrics.tensorboard import SummaryWriter except ImportError as ie: has_tensorboard = False print(f"Unable to display metrics through TensorBoard because some package are not installed: {ie}") else: print( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ text_model_name_or_path: str = field( metadata={ "help": ( "The text model checkpoint for weights initialization." "Don't set if you want to train a model from scratch." ) }, ) vision_model_name_or_path: str = field( metadata={ "help": ( "The vision model checkpoint for weights initialization." "Don't set if you want to train a model from scratch." ) }, ) from_pt: bool = field( default=True, metadata={"help": "whether to load the text and vision model using PyTorch checkpoints."}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ data_dir: Optional[str] = field(default=None, metadata={"help": "The data directory containing input files."}) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file (a jsonlines file)."}, ) max_seq_length: Optional[int] = field( default=72, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) def __post_init__(self): if self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension == "json", "`train_file` should be a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension == "json", "`validation_file` should be a json file." # We use torchvision for faster image pre-processing. # We need to ensure faster processing speed as it can become a bottleneck on TPU class Transform(torch.nn.Module): def __init__(self, image_size): super().__init__() self.transforms = torch.nn.Sequential( Resize([image_size], interpolation=InterpolationMode.BICUBIC), CenterCrop(image_size), ConvertImageDtype(torch.float), Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)), ) def forward(self, x: torch.Tensor) -> torch.Tensor: with torch.no_grad(): x = self.transforms(x) return x class ImageTextDataset(VisionDataset): """ Dtaset for loading image-text data for tasks like CLIP training, Image Captioning. Args: root: (string): The root path where the dataset is stored file_path: (string): Path to the file containing the image_paths and associated captions. The expected format is jsonlines where each line is a json object containing to keys. `image_path`: The path to the image. `captions`: An `array` of captions. transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.ToTensor`` target_transform (callable, optional): A function/transform that takes in the target and transforms it. transforms (callable, optional): A function/transform that takes input sample and its target as entry and returns a transformed version. """ def __init__( self, root: str, file_path: str, captions_per_image=2, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None, transforms: Optional[Callable] = None, ): super().__init__(root, transforms, transform, target_transform) with open(file_path, "r") as f: examples = [json.loads(line) for line in f.readlines()] self.captions = [] self.image_paths = [] for example in examples: captions_subset = example["captions"][:captions_per_image] self.captions.extend(captions_subset) self.image_paths.extend([example["image_path"]] * len(captions_subset)) def _load_image(self, idx: int): path = self.image_paths[idx] return read_image(path, mode=ImageReadMode.RGB) def _load_target(self, idx): return self.captions[idx] def __getitem__(self, index: int): image = self._load_image(index) target = self._load_target(index) if self.transforms is not None: image, target = self.transforms(image, target) return image, target def __len__(self) -> int: return len(self.captions) class TrainState(train_state.TrainState): dropout_rng: jnp.ndarray def replicate(self): return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng)) def write_metric(summary_writer, train_metrics, eval_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: transformers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.text_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.text_model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) model = FlaxHybridCLIP.from_text_vision_pretrained( model_args.text_model_name_or_path, model_args.vision_model_name_or_path, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype), text_from_pt=model_args.from_pt, vision_from_pt=model_args.from_pt, ) config = model.config # set seed for torch dataloaders set_seed(training_args.seed) # Initialize torchvision transforms and jit them for faster processing preprocess = Transform(config.vision_config.image_size) preprocess = torch.jit.script(preprocess) # Initialize the image-text dataset train_dataset = ImageTextDataset( data_args.data_dir, data_args.train_file, captions_per_image=2, transform=preprocess, ) eval_dataset = ImageTextDataset( data_args.data_dir, data_args.validation_file, captions_per_image=1, transform=preprocess, ) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() steps_per_epoch = len(train_dataset) // train_batch_size total_train_steps = steps_per_epoch * num_epochs # Use collate function to tokenizer the text and convert the processed images to numpy def collate_fn(examples): pixel_values = torch.stack([example[0] for example in examples]).permute(0, 2, 3, 1).numpy() captions = [example[1] for example in examples] inputs = tokenizer( captions, max_length=data_args.max_seq_length, padding="max_length", truncation=True, return_tensors="np" ) batch = { "pixel_values": pixel_values, "input_ids": inputs["input_ids"], "attention_mask": inputs["attention_mask"], } return batch # Create data loaders train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=train_batch_size, shuffle=True, num_workers=data_args.preprocessing_num_workers, persistent_workers=True, drop_last=True, collate_fn=collate_fn, ) eval_loader = torch.utils.data.DataLoader( eval_dataset, batch_size=eval_batch_size, shuffle=False, num_workers=data_args.preprocessing_num_workers, persistent_workers=True, drop_last=True, collate_fn=collate_fn, ) # Enable tensorboard only on the master node if has_tensorboard and jax.process_index() == 0: summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir).joinpath("logs").as_posix()) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, ) # Setup train state state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng) def cross_entropy(logits, axis): logprobs = jax.nn.log_softmax(logits, axis=axis) nll = jnp.diag(logprobs) ce = -jnp.mean(nll) return ce def clip_loss(similarity): loss = (cross_entropy(similarity, axis=0) + cross_entropy(similarity, axis=1)) / 2 return loss # Define gradient update step fn def train_step(state, batch): dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng) def compute_loss(params): logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss = clip_loss(logits) return loss grad_fn = jax.value_and_grad(compute_loss) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)} metrics = jax.lax.pmean(metrics, axis_name="batch") return new_state, metrics # Define eval fn def eval_step(params, batch): logits = model(**batch, params=params, train=False)[0] loss = clip_loss(logits) # summarize metrics metrics = {"loss": loss} metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics # Create parallel version of the train and eval step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) p_eval_step = jax.pmap(eval_step, "batch") # Replicate the train state on each device state = state.replicate() logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Total optimization steps = {total_train_steps}") train_time = 0 # Create sampling rng rng, input_rng = jax.random.split(rng) epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() # Create sampling rng rng, input_rng = jax.random.split(rng) train_metrics = [] steps_per_epoch = len(train_dataset) // train_batch_size train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False) # train for batch in train_loader: batch = shard(batch) state, train_metric = p_train_step(state, batch) train_metrics.append(train_metric) train_step_progress_bar.update(1) train_time += time.time() - train_start train_metric = unreplicate(train_metric) train_step_progress_bar.close() epochs.write( f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) # ======================== Evaluating ============================== eval_metrics = [] eval_steps = len(eval_dataset) // eval_batch_size eval_step_progress_bar = tqdm(total=eval_steps, desc="Evaluating...", position=2, leave=False) for batch in eval_loader: # Model forward batch = shard(batch) metrics = p_eval_step(state.params, batch) eval_metrics.append(metrics) eval_step_progress_bar.update(1) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Print metrics and update progress bar eval_step_progress_bar.close() desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']})" epochs.write(desc) epochs.desc = desc # Save metrics if has_tensorboard and jax.process_index() == 0: cur_step = epoch * (len(train_dataset) // train_batch_size) write_metric(summary_writer, train_metrics, eval_metrics, train_time, cur_step) # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(unreplicate(state.params)) model.save_pretrained( training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f"Saving weights and logs of epoch {epoch+1}", ) if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/model_parallel/run_clm_mp.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pre-training/Fine-tuning the GPTNeo model for causal language modeling on a text file or a dataset using model parallelism. """ import logging import math import os import sys import time from dataclasses import dataclass, field from itertools import chain from pathlib import Path from typing import Callable, Optional import datasets import jax import jax.numpy as jnp import numpy as np import optax from datasets import Dataset, load_dataset from flax.core.frozen_dict import freeze, unfreeze from flax.training.common_utils import onehot, stack_forest from jax.experimental.maps import mesh from jax.experimental.pjit import pjit from partitions import set_partitions from tqdm import tqdm import transformers from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForCausalLM, HfArgumentParser, TrainingArguments, is_tensorboard_available, ) from transformers.testing_utils import CaptureLogger logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_CAUSAL_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization.Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) block_size: Optional[int] = field( default=None, metadata={ "help": ( "Optional input sequence length after tokenization. " "The training dataset will be truncated in block of this size for training. " "Default to the model max input length for single sentence inputs (take into account special tokens)." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False): """ Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. Shuffle batches if `shuffle` is `True`. """ steps_per_epoch = len(dataset) // batch_size if shuffle: batch_idx = jax.random.permutation(rng, len(dataset)) else: batch_idx = jnp.arange(len(dataset)) batch_idx = batch_idx[: steps_per_epoch * batch_size] # Skip incomplete batch. batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) for idx in batch_idx: batch = dataset[idx] batch = {k: jnp.array(v) for k, v in batch.items()} yield batch def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = stack_forest(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def create_learning_rate_fn( train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float ) -> Callable[[int], jnp.array]: """Returns a linear warmup, linear_decay learning rate function.""" steps_per_epoch = train_ds_size // train_batch_size num_train_steps = steps_per_epoch * num_train_epochs warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps) decay_fn = optax.linear_schedule( init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps ) schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps]) return schedule_fn def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty." "Use --overwrite_output_dir to overcome." ) # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) if jax.process_index() == 0: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, keep_in_memory=False ) if "validation" not in dataset.keys(): dataset["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, ) dataset["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.train_file.split(".")[-1] if extension == "txt": extension = "text" dataset = load_dataset(extension, data_files=data_files, cache_dir=model_args.cache_dir) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained config and tokenizer if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if training_args.do_train: column_names = dataset["train"].column_names else: column_names = dataset["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] # since this will be pickled to avoid _LazyModule error in Hasher force logger loading before tokenize_function tok_logger = transformers.utils.logging.get_logger("transformers.tokenization_utils_base") def tokenize_function(examples): with CaptureLogger(tok_logger) as cl: output = tokenizer(examples[text_column_name]) # clm input could be much much longer than block_size if "Token indices sequence length is longer than the" in cl.out: tok_logger.warning( "^^^^^^^^^^^^^^^^ Please ignore the warning above - this long input will be chunked into smaller bits" " before being passed to the model." ) return output tokenized_datasets = dataset.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, ) if data_args.block_size is None: block_size = tokenizer.model_max_length if block_size > config.max_position_embeddings: logger.warning( f"The tokenizer picked seems to have a very large `model_max_length` ({tokenizer.model_max_length}). " "Picking 1024 instead. You can change that default value by passing --block_size xxx." ) block_size = 1024 else: if data_args.block_size > tokenizer.model_max_length: logger.warning( f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model" f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}." ) block_size = min(data_args.block_size, tokenizer.model_max_length) # Main data processing function that will concatenate all texts from our dataset and generate chunks of block_size. def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: list(chain(*examples[k])) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. if total_length >= block_size: total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result # Note that with `batched=True`, this map processes 1,000 texts together, so group_texts throws away a remainder # for each of those groups of 1,000 texts. You can adjust that batch_size here but a higher value might be slower # to preprocess. # # To speed up this part, we use multiprocessing. See the documentation of the map method for more information: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.map lm_datasets = tokenized_datasets.map( group_texts, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_train: if "train" not in tokenized_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = lm_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) if training_args.do_eval: if "validation" not in tokenized_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = lm_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) rng, dropout_rng = jax.random.split(rng) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() steps_per_epoch = len(train_dataset) // train_batch_size total_train_steps = steps_per_epoch * num_epochs # TODO: weights should be initialized in pjitted fun, this won't work for REALLY large models # TODO: when loading from pre-trained model we need to make sure the vocab is divisible by num_partitions # GPT2's vocab is odd, we need to resize it for fine-tuning model = FlaxAutoModelForCausalLM.from_pretrained( model_args.model_name_or_path, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) # Create learning rate schedule linear_decay_lr_schedule_fn = create_learning_rate_fn( len(train_dataset), train_batch_size, training_args.num_train_epochs, training_args.warmup_steps, training_args.learning_rate, ) optimizer = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, ) def get_initial_state(params): state = optimizer.init(params) return tuple(state), params # Get PartitionSpec for model params param_spec = set_partitions(unfreeze(model.params)) # Get the PyTree for opt_state, we don't actually initialize the opt_state yet. params_shapes = jax.tree_util.tree_map(lambda x: x.shape, model.params) state_shapes = jax.eval_shape(get_initial_state, params_shapes) # get PartitionSpec for opt_state, this is very specific to adamw # TODO: optax returns different state for different optimizers, how can we handle this generically ? # or maybe we don't since in our examples we just use adamw or adafactor def get_opt_spec(x): if isinstance(x, dict): return param_spec return None opt_state_spec, param_spec = jax.tree_util.tree_map( get_opt_spec, state_shapes, is_leaf=lambda x: isinstance(x, (dict, optax.EmptyState)) ) # pjit the get_initial_state function to shard params and init # optimizer state in sharded way p_get_initial_state = pjit( get_initial_state, in_axis_resources=None, out_axis_resources=(opt_state_spec, param_spec), ) # hack: move the inital params to CPU to free up device memory # TODO: allow loading weights on CPU in pre-trained model model.params = jax.tree_util.tree_map(lambda x: np.asarray(x), model.params) # mesh defination mesh_devices = np.array(jax.devices()).reshape(1, jax.local_device_count()) # actually initialize the opt_state with mesh(mesh_devices, ("dp", "mp")): opt_state, params = p_get_initial_state(freeze(model.params)) # cross-entropy with z loss def loss_fn(logits, labels, z_loss=0): shift_logits = logits[..., :-1, :] shift_labels = labels[..., 1:] shift_labels = onehot(shift_labels, shift_logits.shape[-1]) shift_logits = shift_logits - jax.lax.stop_gradient(shift_logits.max(axis=-1, keepdims=True)) log_z = jnp.log(jnp.sum(jnp.exp(shift_logits), axis=-1, keepdims=True)) log_softmax = shift_logits - log_z loss = -jnp.sum(shift_labels * log_softmax, axis=-1) loss += (1e-4 * jnp.square(log_z.squeeze(-1))) * z_loss return loss.mean() # Define gradient update step fn # TODO: try to use TrainState instead of passing params and opt_state individually def train_step(params, opt_state, dropout_rng, batch, step): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def compute_loss(params): labels = batch.pop("labels") logits = model(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] loss = loss_fn(logits, labels, z_loss=1.0) return loss grad_fn = jax.value_and_grad(compute_loss) loss, grads = grad_fn(params) updates, new_opt_state = optimizer.update(grads, opt_state, params) new_params = optax.apply_updates(params, updates) metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(step)} return new_params, tuple(new_opt_state), new_dropout_rng, metrics, step + 1 # Define eval fn def eval_step(input_ids, labels, params): logits = model(input_ids=input_ids, params=params, train=False)[0] loss = loss_fn(logits, labels) # metrics return {"loss": loss} p_train_step = pjit( train_step, in_axis_resources=(param_spec, opt_state_spec, None, None, None), out_axis_resources=(param_spec, opt_state_spec, None, None, None), donate_argnums=(0, 1), ) p_eval_step = pjit( eval_step, in_axis_resources=(None, None, param_spec), out_axis_resources=None, ) logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {num_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}") logger.info(f" Total optimization steps = {total_train_steps}") train_time = 0 train_metrics = [] epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) global_step = 0 # we are not doing 2D parallelism (yet!), this just does model parallelism with mesh(mesh_devices, ("dp", "mp")): for _ in epochs: # ======================== Training ================================ train_start = time.time() # Create sampling rng rng, input_rng = jax.random.split(rng) # Generate an epoch by shuffling sampling indices from the train dataset train_metrics = [] train_loader = data_loader(input_rng, train_dataset, train_batch_size, shuffle=True) steps_per_epoch = len(train_dataset) // train_batch_size # train for _ in tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False): batch = next(train_loader) params, opt_state, dropout_rng, train_metric, global_step = p_train_step( params, opt_state, dropout_rng, batch, global_step, ) train_metrics.append(train_metric) cur_step = global_step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step} | Loss: {train_metric['loss']}, Learning Rate:" f" {train_metric['learning_rate']})" ) train_metrics = [] if cur_step % training_args.eval_steps == 0 and cur_step > 0: # ======================== Evaluating ============================== eval_metrics = [] eval_loader = data_loader(input_rng, eval_dataset, eval_batch_size) eval_steps = len(eval_dataset) // eval_batch_size for _ in tqdm(range(eval_steps), desc="Evaluating...", position=2, leave=False): batch = next(eval_loader) metrics = p_eval_step(batch["input_ids"], batch["labels"], params) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = stack_forest(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) try: eval_metrics["perplexity"] = math.exp(eval_metrics["loss"]) except OverflowError: eval_metrics["perplexity"] = float("inf") logger.info( f"Step... ({cur_step} | Eval loss: {eval_metrics['loss']} | Eval Perplexity:" f" {eval_metrics['perplexity']}" ) if cur_step % training_args.save_steps == 0 and cur_step > 0: # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(params) model.save_pretrained( training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f"Saving weights and logs of step {cur_step}", ) if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/model_parallel/README.md
<!--- Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Model parallel language model training example The following example showcases how to train/fine-tune GPTNeo model with model parallelism using the JAX/Flax backend and the [`pjit`](https://jax.readthedocs.io/en/latest/jax.experimental.pjit.html) transformation. > Note: The example is experimental and might have bugs. Also currently it only supports single V3-8. The `partition.py` file defines the `PyTree` of `ParitionSpec` for the GPTNeo model which describes how the model will be sharded. The actual sharding is auto-matically handled by `pjit`. The weights are sharded across all local devices. To adapt the script for other models, we need to also change the `ParitionSpec` accordingly. TODO: Add more explantion. Before training, let's prepare our model first. To be able to shard the model, the sharded dimention needs to be a multiple of devices it'll be sharded on. But GPTNeo's vocab size is 50257, so we need to resize the embeddings accordingly. ```python from transformers import FlaxGPTNeoForCausalLM, GPTNeoConfig model = FlaxGPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-1.3B") emb = jnp.zeros((50264, model.config.hidden_size)) # update the first 50257 weights using pre-trained weights emb = emb.at[:50257, :].set(model.params["transformer"]["wte"]["embedding"]) params = model.params params["transformer"]["wte"]["embedding"] = emb # initialize a random model with the right vocab_size config = GPTNeoConfig.from_pretrained("EleutherAI/gpt-neo-1.3B", vocab_size=50264) model = FlaxGPTNeoForCausalLM(config) # assign the pre-trained weights and save the model. model.params = params model.save_pretrained("gpt-neo-1.3B") ``` ### Train Model ```bash python run_clm_mp.py \ --model_name_or_path gpt-neo-1.3B \ --tokenizer_name gpt2 \ --dataset_name wikitext --dataset_config_name wikitext-2-raw-v1 \ --do_train --do_eval \ --block_size 1024 \ --num_train_epochs 5 \ --learning_rate 4e-6 \ --per_device_train_batch_size 3 --per_device_eval_batch_size 3 \ --overwrite_output_dir --output_dir ~/tmp/flax-clm \ --cache_dir ~/datasets_cache/wikitext --dtype bfloat16 \ --logging_steps 96 --eval_steps 96 ```
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/model_parallel/partitions.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The Google Research Authors and The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities for constructing PyTrees of PartitionSpecs.""" # utils adapted from https://github.com/google-research/google-research/blob/master/flax_models/t5x/partitions.py import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _unmatched = object() # For specifying empty leaf dict `{}` empty_dict = object() def _match(qs, ks): """Return True if regexes in qs match any window of strings in tuple ks.""" # compile regexes and force complete match qts = tuple((re.compile(x + "$") for x in qs)) for i in range(len(ks) - len(qs) + 1): matches = [x.match(y) for x, y in zip(qts, ks[i:])] if matches and all(matches): return True return False def _replacement_rules(rules): def replace(key, val): for rule, replacement in rules: if _match(rule, key): return replacement return val return replace # PartitionSpec for GPTNeo # replicate the hidden dim and shard feed-forward and head dim def _get_partition_rules(): return [ # embeddings (("transformer", "wpe", "embedding"), P("mp", None)), (("transformer", "wte", "embedding"), P("mp", None)), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")), (("attention", "out_proj", "kernel"), P("mp", None)), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(None, "mp")), (("mlp", "c_fc", "bias"), P("mp")), (("mlp", "c_proj", "kernel"), P("mp", None)), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def set_partitions(in_dict): rules = _get_partition_rules() replace = _replacement_rules(rules) initd = {k: _unmatched for k in flatten_dict(in_dict)} result = {k: replace(k, v) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(result))
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/wav2vec2/README.md
# Wav2Vec2 Contrastive Loss PreTraining examples The following example showcases how to pretrain a wav2vec2 model using the JAX/Flax backend. Pretraining Wav2Vec2 is rather complex, so it is highly recommended to read the [official paper](https://arxiv.org/abs/2006.11477). JAX/Flax allows you to trace pure functions and compile them into efficient, fused accelerator code on both GPU and TPU. Models written in JAX/Flax are **immutable** and updated in a purely functional way which enables simple and efficient model parallelism. `run_wav2vec2_pretrain_flax.py` is a lightweight example of how to download and preprocess a dataset from the 🤗 Datasets library or use your own files (jsonlines or csv), then pretrain the wav2vec2 architectures above on it. For custom datasets in `jsonlines` format please see: [the Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.html#json-files) and you also will find examples of these below. Let's start by creating a model repository to save the trained model and logs. Here we call the model `"wav2vec2-base-robust"`, but you can change the model name as you like. You can do this either directly on [huggingface.co](https://huggingface.co/new) (assuming that you are logged in) or via the command line: ``` huggingface-cli repo create wav2vec2-base-robust ``` Next we clone the model repository to add the tokenizer and model files. ``` git clone https://huggingface.co/<your-username>/wav2vec2-base-robust ``` To ensure that all tensorboard traces will be uploaded correctly, we need to track them. You can run the following command inside your model repo to do so. ``` cd wav2vec2-base-robust git lfs track "*tfevents*" ``` Great, we have set up our model repository. During training, we will automatically push the training logs and model weights to the repo. Next, let's add a symbolic link to the `run_wav2vec2_pretrain_flax`. ```bash export MODEL_DIR="./wav2vec2-base-robust" ln -s ~/transformers/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py ./ ``` ### Create the model configuration Let's first create the model configuration and store it in the model repository. Note that many training parameters can be set in the model configuration including the configuration about the masking distribution (`mask_time_length`, `mask_time_prob`), dropout (`attention_dropout`, ...), the trade-off between the contrastive loss and the diversity loss, etc... Mostly likely you will need to change these parameters depending on your use case. Again, we highly recommend to read the [official paper](https://arxiv.org/abs/2006.11477) to better understand which parameters can be set for pretraining. For this example, we will be using a `"base"`-sized model of Wav2Vec2 with robust layer norm and keep most of the default settings. ```python model_dir="./wav2vec2-base-robust" from transformers import Wav2Vec2Config config = Wav2Vec2Config.from_pretrained( "facebook/wav2vec2-base", mask_time_length=10, mask_time_prob=0.05, diversity_loss_weight=0.1, num_negatives=100, do_stable_layer_norm=True, feat_extract_norm="layer", ) config.save_pretrained(model_dir) ``` ### Create a feature extractor configuration Before we can start the training, we need to define a feature extractor that takes care of normalization, etc... Here we can also re-use the feature extractor of [wav2vec2-base-960h](https://huggingface.co/facebook/wav2vec2-base) while making sure that padding is allowed. ```python model_dir="./wav2vec2-base-robust" from transformers import Wav2Vec2FeatureExtractor config = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base", return_attention_mask=True) config.save_pretrained(model_dir) ``` ### Train the model Finally, we can run the example script to train the model: ```bash ./run_wav2vec2_pretrain_flax.py \ --output_dir=${MODEL_DIR} \ --num_train_epochs="5" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --learning_rate="5e-4" \ --weight_decay="0.01" \ --warmup_steps="2000" \ --model_name_or_path=${MODEL_DIR} \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="4" \ --max_duration_in_seconds="10.0" \ --adam_beta1="0.9" \ --adam_beta2="0.98" \ --pad_to_multiple_of="16384" \ --push_to_hub ``` Note that this script is not fully tested yet, so we cannot ensure that the above script leads to satisfying results.
0
hf_public_repos/transformers/examples/research_projects/jax-projects
hf_public_repos/transformers/examples/research_projects/jax-projects/wav2vec2/run_wav2vec2_pretrain_flax.py
#!/usr/bin/env python3 import logging import sys import time from dataclasses import field from pathlib import Path from typing import Dict, List, Optional, Union import flax import jax import jax.numpy as jnp import librosa import numpy as np import optax from datasets import DatasetDict, load_dataset from flax import jax_utils, traverse_util from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from tqdm import tqdm from transformers import ( FlaxWav2Vec2ForPreTraining, HfArgumentParser, TrainingArguments, Wav2Vec2Config, Wav2Vec2FeatureExtractor, is_tensorboard_available, ) from transformers.models.wav2vec2.modeling_flax_wav2vec2 import _compute_mask_indices, _sample_negative_indices logger = logging.getLogger(__name__) @flax.struct.dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) freeze_feature_extractor: Optional[bool] = field( default=True, metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) verbose_logging: Optional[bool] = field( default=False, metadata={"help": "Whether to log verbose messages or not."}, ) max_gumbel_temperature: Optional[float] = field( default=2.0, metadata={"help": "Maximum temperature for gumbel softmax."} ) min_gumbel_temperature: Optional[float] = field( default=0.1, metadata={"help": "Minimum temperature for gumbel softmax."} ) gumbel_temperature_decay: Optional[float] = field( default=0.999995, metadata={"help": "Decay of gumbel temperature during training."} ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @flax.struct.dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_split_name: Optional[str] = field( default="train", metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" }, ) validation_split_name: Optional[str] = field( default="validation", metadata={ "help": ( "The name of the validation data set split to use (via the datasets library). Defaults to 'validation'" ) }, ) speech_file_column: Optional[str] = field( default="file", metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_duration_in_seconds: Optional[float] = field( default=20.0, metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} ) pad_to_multiple_of: Optional[int] = field( default=1024, metadata={ "help": ( "If set will pad the sequence to a multiple of the provided value. This is important to avoid" " triggering recompilations on TPU" ) }, ) @flax.struct.dataclass class FlaxDataCollatorForWav2Vec2Pretraining: """ Data collator that will dynamically pad the inputs received and prepare masked indices for self-supervised pretraining. Args: model (:class:`~transformers.FlaxWav2Vec2ForPreTraining`): The Wav2Vec2 model used for pretraining. The data collator needs to have access to config and ``_get_feat_extract_output_lengths`` function for correct padding. feature_extractor (:class:`~transformers.Wav2Vec2FeatureExtractor`): The processor used for proccessing the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the ``input_values`` of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ model: FlaxWav2Vec2ForPreTraining feature_extractor: Wav2Vec2FeatureExtractor padding: Union[bool, str] = "longest" pad_to_multiple_of: Optional[int] = None max_length: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], np.ndarray]]]) -> Dict[str, np.ndarray]: # reformat list to dict and set to pytorch format batch = self.feature_extractor.pad( features, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="np", ) mask_indices_seq_length = self.model._get_feat_extract_output_lengths(batch["input_values"].shape[-1]) batch_size = batch["input_values"].shape[0] attention_mask = None if batch["attention_mask"] is not None: output_lengths = self.model._get_feat_extract_output_lengths(batch["attention_mask"].sum(-1)) attention_mask = np.zeros((batch_size, mask_indices_seq_length), dtype=np.int8) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[(np.arange(attention_mask.shape[0]), output_lengths - 1)] = 1 attention_mask = jnp.flip(jnp.flip(attention_mask, -1).cumsum(-1), -1).astype("bool") # sample randomly masked indices batch["mask_time_indices"] = _compute_mask_indices( (batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=attention_mask, min_masks=2, ) # sample indices to take for negative vectors batch["sampled_negative_indices"] = _sample_negative_indices( (batch["mask_time_indices"].shape + (self.model.config.proj_codevector_dim,)), self.model.config.num_negatives, attention_mask=attention_mask, ) return batch def configure_logger(model_args: ModelArguments, training_args: TrainingArguments): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logging_level = logging.WARNING if model_args.verbose_logging: logging_level = logging.DEBUG logger.setLevel(logging_level) def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: num_samples = len(samples_idx) samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size batch_idx = np.split(samples_idx, sections_split) return batch_idx def compute_contrastive_loss( quantized_features, transformer_features, negative_indices, mask_time_indices, logits_temp, num_negatives ): batch_size, sequence_length, hidden_size = quantized_features.shape # take negative vectors from sampled indices quantized_negatives = quantized_features.reshape(-1, hidden_size)[negative_indices.reshape(-1)] quantized_negatives = quantized_negatives.reshape( batch_size, sequence_length, num_negatives, hidden_size ).transpose(2, 0, 1, 3) target_features = jnp.concatenate([quantized_features[None, :], quantized_negatives], axis=0) loss_logits = optax.cosine_similarity(transformer_features, target_features) loss_logits = loss_logits / logits_temp neg_is_pos = (quantized_features == quantized_negatives).all(-1) neg_is_pos = jnp.concatenate([jnp.full((1,) + loss_logits.shape[1:], False), neg_is_pos], axis=0) # make sure incorrectly sampled vectors don't contribute to loss loss_logits = jnp.where(neg_is_pos, -1e9, loss_logits) predictions = loss_logits.transpose(2, 1, 0).reshape(-1, loss_logits.shape[0]) targets = ((1 - mask_time_indices) * -100).transpose(1, 0).flatten() target_mask = jnp.where(targets >= 0, 1.0, 0.0) contrastive_loss = optax.softmax_cross_entropy(predictions, onehot(targets, predictions.shape[-1])) * target_mask contrastive_loss = contrastive_loss.sum() return contrastive_loss def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) model_args, data_args, training_args = parser.parse_args_into_dataclasses() configure_logger(model_args, training_args) # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[:{data_args.validation_split_percentage}%]", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}[{data_args.validation_split_percentage}%:]", cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" datasets = DatasetDict() datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split="validation", cache_dir=model_args.cache_dir, ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"{data_args.train_split_name}", cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=True ) def prepare_dataset(batch): # check that all files have the correct sampling rate batch["speech"], _ = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate) return batch # load audio files into numpy arrays vectorized_datasets = datasets.map( prepare_dataset, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets["train"].column_names ) # filter audio files that are too long vectorized_datasets = vectorized_datasets.filter( lambda data: len(data["speech"]) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate) ) def normalize(batch): return feature_extractor(batch["speech"], sampling_rate=feature_extractor.sampling_rate) # normalize and transform to `BatchFeatures` vectorized_datasets = vectorized_datasets.map( normalize, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets["train"].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 config = Wav2Vec2Config.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( "PreTraining is only supported for ``config.do_stable_layer_norm=True`` and" " ``config.feat_extract_norm='layer'" ) model = FlaxWav2Vec2ForPreTraining(config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype)) # Activate gradient checkpointing if needed if training_args.gradient_checkpointing: model.gradient_checkpointing_enable() data_collator = FlaxDataCollatorForWav2Vec2Pretraining( model=model, feature_extractor=feature_extractor, pad_to_multiple_of=data_args.pad_to_multiple_of ) # Enable tensorboard only on the master node has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) else: logger.warning( "Unable to display metrics through TensorBoard because the package is not installed: " "Please run pip install tensorboard to enable." ) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) gumbel_rngs = jax.random.split(rng, jax.local_device_count()) num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() num_train_steps = len(vectorized_datasets["train"]) // train_batch_size * num_epochs # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) flat_mask = { path: (path[-1] != "bias" and path[-2:] not in [("layer_norm", "scale"), ("final_layer_norm", "scale")]) for path in flat_params } return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state and define training hyper-parameters state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw) num_negatives = model.config.num_negatives contrastive_logits_temperature = model.config.contrastive_logits_temperature num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups diversity_loss_weight = model.config.diversity_loss_weight # Define gradient update step fn def train_step(state, batch, dropout_rng, gumbel_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) gumbel_rng, new_gumbel_rng = jax.random.split(gumbel_rng) def loss_fn(params): negative_indices = batch.pop("sampled_negative_indices") gumbel_temperature = jnp.clip( model_args.max_gumbel_temperature * model_args.gumbel_temperature_decay**state.step, a_min=model_args.min_gumbel_temperature, ) outputs = state.apply_fn( **batch, gumbel_temperature=gumbel_temperature, params=params, dropout_rng=dropout_rng, gumbel_rng=gumbel_rng, train=True, ) contrastive_loss = compute_contrastive_loss( outputs.projected_quantized_states, outputs.projected_states, negative_indices, batch["mask_time_indices"], contrastive_logits_temperature, num_negatives, ) diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors loss = contrastive_loss + diversity_loss_weight * diversity_loss return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean( {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" ) return new_state, metrics, new_dropout_rng, new_gumbel_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): negative_indices = batch.pop("sampled_negative_indices") outputs = model(**batch, params=params, train=False) contrastive_loss = compute_contrastive_loss( outputs.projected_quantized_states, outputs.projected_states, negative_indices, batch["mask_time_indices"], contrastive_logits_temperature, num_negatives, ) diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors loss = contrastive_loss + diversity_loss_weight * diversity_loss # summarize metrics metrics = {"loss": loss.mean(), "codevector_perplexity": outputs.codevector_perplexity} metrics = jax.lax.pmean(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 train_metrics = [] epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0) for epoch in epochs: # ======================== Training ================================ train_start = time.time() # Create sampling rng rng, input_rng = jax.random.split(rng) # Generate an epoch by shuffling sampling indices from the train dataset num_train_samples = len(vectorized_datasets["train"]) # Avoid using jax.numpy here in case of TPU training train_samples_idx = np.random.permutation(np.arange(num_train_samples)) train_batch_idx = generate_batch_splits(train_samples_idx, train_batch_size) # Gather the indexes for creating the batch and do a training step for step, batch_idx in enumerate(tqdm(train_batch_idx, desc="Training...", position=1)): samples = [vectorized_datasets["train"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples) model_inputs = shard(model_inputs.data) # Model forward state, train_metric, dropout_rngs, gumbel_rngs = p_train_step( state, model_inputs, dropout_rngs, gumbel_rngs ) train_metrics.append(train_metric) cur_step = epoch * (num_train_samples // train_batch_size) + step if cur_step % training_args.logging_steps == 0 and cur_step > 0: # Save metrics train_metric = jax_utils.unreplicate(train_metric) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, cur_step) epochs.write( f"Step... ({cur_step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" f" {train_metric['learning_rate'].mean()})" ) train_metrics = [] # ======================== Evaluating ============================== num_eval_samples = len(vectorized_datasets["validation"]) # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) eval_metrics = [] for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=2)): samples = [vectorized_datasets["validation"][int(idx)] for idx in batch_idx] model_inputs = data_collator(samples) # Model forward model_inputs = shard(model_inputs.data) metrics = p_eval_step(state.params, model_inputs) eval_metrics.append(metrics) # get eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.mean, eval_metrics) # Update progress bar epochs.write( f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {eval_metrics['loss']}, Perplexity:" f" {eval_metrics['codevector_perplexity']})" ) # Save metrics if has_tensorboard and jax.process_index() == 0: cur_step = epoch * (len(vectorized_datasets["train"]) // train_batch_size) write_eval_metric(summary_writer, eval_metrics, cur_step) # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained(training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub) if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/fsner/requirements.txt
transformers>=4.9.2
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/fsner/README.md
<p align="center"> <img src="http://sayef.tech:8082/uploads/FSNER-LOGO-2.png" alt="FSNER LOGO"> </p> <p align="center"> Implemented by <a href="https://huggingface.co/sayef"> sayef </a>. </p> ## Overview The FSNER model was proposed in [Example-Based Named Entity Recognition](https://arxiv.org/abs/2008.10570) by Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade Huang, Weizhu Chen. To identify entity spans in a new domain, it uses a train-free few-shot learning approach inspired by question-answering. ## Abstract ---- > We present a novel approach to named entity recognition (NER) in the presence of scarce data that we call example-based NER. Our train-free few-shot learning approach takes inspiration from question-answering to identify entity spans in a new and unseen domain. In comparison with the current state-of-the-art, the proposed method performs significantly better, especially when using a low number of support examples. ## Model Training Details ----- | identifier | epochs | datasets | | ---------- |:----------:| :-----:| | [sayef/fsner-bert-base-uncased](https://huggingface.co/sayef/fsner-bert-base-uncased) | 10 | ontonotes5, conll2003, wnut2017, and fin (Alvarado et al.). | ## Installation and Example Usage ------ You can use the FSNER model in 3 ways: 1. Install directly from PyPI: `pip install fsner` and import the model as shown in the code example below or 2. Install from source: `python setup.py install` and import the model as shown in the code example below or 3. Clone repo and change directory to `src` and import the model as shown in the code example below ```python from fsner import FSNERModel, FSNERTokenizerUtils model = FSNERModel("sayef/fsner-bert-base-uncased") tokenizer = FSNERTokenizerUtils("sayef/fsner-bert-base-uncased") # size of query and supports must be the same. If you want to find all the entitites in one particular query, just repeat the same query n times where n is equal to the number of supports (or entities). query = [ 'KWE 4000 can reach with a maximum speed from up to 450 P/min an accuracy from 50 mg', 'I would like to order a computer from eBay.', ] # each list in supports are the examples of one entity type # wrap entities around with [E] and [/E] in the examples supports = [ [ 'Horizontal flow wrapper [E] Pack 403 [/E] features the new retrofit-kit „paper-ON-form“', '[E] Paloma Pick-and-Place-Roboter [/E] arranges the bakery products for the downstream tray-forming equipment', 'Finally, the new [E] Kliklok ACE [/E] carton former forms cartons and trays without the use of glue', 'We set up our pilot plant with the right [E] FibreForm® [/E] configuration to make prototypes for your marketing tests and package validation', 'The [E] CAR-T5 [/E] is a reliable, purely mechanically driven cartoning machine for versatile application fields' ], [ "[E] Walmart [/E] is a leading e-commerce company", "I recently ordered a book from [E] Amazon [/E]", "I ordered this from [E] ShopClues [/E]", "[E] Flipkart [/E] started it's journey from zero" ] ] device = 'cpu' W_query = tokenizer.tokenize(query).to(device) W_supports = tokenizer.tokenize(supports).to(device) start_prob, end_prob = model(W_query, W_supports) output = tokenizer.extract_entity_from_scores(query, W_query, start_prob, end_prob, thresh=0.50) print(output) ```
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/fsner/setup.py
import setuptools with open("README.md", "r", encoding="utf-8") as fh: long_description = fh.read() setuptools.setup( name="fsner", version="0.0.1", author="msi sayef", author_email="msi.sayef@gmail.com", description="Few-shot Named Entity Recognition", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/huggingface/transformers/tree/main/examples/research_projects/fsner", project_urls={ "Bug Tracker": "https://github.com/huggingface/transformers/issues", }, classifiers=[ "Programming Language :: Python :: 3", "Operating System :: OS Independent", ], package_dir={"": "src"}, packages=setuptools.find_packages(where="src"), python_requires=">=3.6", install_requires=["torch>=1.9.0", "transformers>=4.9.2"], )
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/fsner/pyproject.toml
[build-system] requires = [ "setuptools>=57.4.0", "wheel>=0.37.0", "transformers>=4.9.2" ] build-backend = "setuptools.build_meta"
0
hf_public_repos/transformers/examples/research_projects/fsner/src
hf_public_repos/transformers/examples/research_projects/fsner/src/fsner/__init__.py
from .model import FSNERModel from .tokenizer_utils import FSNERTokenizerUtils __all__ = ["FSNERModel", "FSNERTokenizerUtils"]
0
hf_public_repos/transformers/examples/research_projects/fsner/src
hf_public_repos/transformers/examples/research_projects/fsner/src/fsner/tokenizer_utils.py
import torch from transformers import AutoTokenizer class FSNERTokenizerUtils(object): def __init__(self, pretrained_model_name_or_path): self.tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path) def tokenize(self, x): """ Wrapper function for tokenizing query and supports Args: x (`List[str] or List[List[str]]`): List of strings for query or list of lists of strings for supports. Returns: `transformers.tokenization_utils_base.BatchEncoding` dict with additional keys and values for start_token_id, end_token_id and sizes of example lists for each entity type """ if isinstance(x, list) and all(isinstance(_x, list) for _x in x): d = None for l in x: t = self.tokenizer( l, padding="max_length", max_length=384, truncation=True, return_tensors="pt", ) t["sizes"] = torch.tensor([len(l)]) if d is not None: for k in d.keys(): d[k] = torch.cat((d[k], t[k]), 0) else: d = t d["start_token_id"] = torch.tensor(self.tokenizer.convert_tokens_to_ids("[E]")) d["end_token_id"] = torch.tensor(self.tokenizer.convert_tokens_to_ids("[/E]")) elif isinstance(x, list) and all(isinstance(_x, str) for _x in x): d = self.tokenizer( x, padding="max_length", max_length=384, truncation=True, return_tensors="pt", ) else: raise Exception( "Type of parameter x was not recognized! Only `list of strings` for query or `list of lists of" " strings` for supports are supported." ) return d def extract_entity_from_scores(self, query, W_query, p_start, p_end, thresh=0.70): """ Extracts entities from query and scores given a threshold. Args: query (`List[str]`): List of query strings. W_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of query sequence tokens in the vocabulary. p_start (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Scores of each token as being start token of an entity p_end (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Scores of each token as being end token of an entity thresh (`float`): Score threshold value Returns: A list of lists of tuples(decoded entity, score) """ final_outputs = [] for idx in range(len(W_query["input_ids"])): start_indexes = end_indexes = range(p_start.shape[1]) output = [] for start_id in start_indexes: for end_id in end_indexes: if start_id < end_id: output.append( ( start_id, end_id, p_start[idx][start_id].item(), p_end[idx][end_id].item(), ) ) output.sort(key=lambda tup: (tup[2] * tup[3]), reverse=True) temp = [] for k in range(len(output)): if output[k][2] * output[k][3] >= thresh: c_start_pos, c_end_pos = output[k][0], output[k][1] decoded = self.tokenizer.decode(W_query["input_ids"][idx][c_start_pos:c_end_pos]) temp.append((decoded, output[k][2] * output[k][3])) final_outputs.append(temp) return final_outputs
0
hf_public_repos/transformers/examples/research_projects/fsner/src
hf_public_repos/transformers/examples/research_projects/fsner/src/fsner/model.py
import torch from transformers import AutoModel class FSNERModel(torch.nn.Module): """ The FSNER model implements a few-shot named entity recognition method from the paper `Example-Based Named Entity Recognition <https://arxiv.org/abs/2008.10570>`__ by Morteza Ziyadi, Yuting Sun, Abhishek Goswami, Jade Huang, Weizhu Chen. To identify entity spans in a new domain, it uses a train-free few-shot learning approach inspired by question-answering. """ def __init__(self, pretrained_model_name_or_path="sayef/fsner-bert-base-uncased"): super(FSNERModel, self).__init__() self.bert = AutoModel.from_pretrained(pretrained_model_name_or_path, return_dict=True) self.cos = torch.nn.CosineSimilarity(3, 1e-08) self.softmax = torch.nn.Softmax(dim=1) def BERT(self, **inputs): return self.bert(**inputs).last_hidden_state def VectorSum(self, token_embeddings): return token_embeddings.sum(2, keepdim=True) def Atten(self, q_rep, S_rep, T=1): return self.softmax(T * self.cos(q_rep, S_rep)) def forward(self, W_query, W_supports): """ Find scores of each token being start and end token for an entity. Args: W_query (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of query sequence tokens in the vocabulary. W_supports (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of support sequence tokens in the vocabulary. Returns: p_start (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Scores of each token as being start token of an entity p_end (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Scores of each token as being end token of an entity """ support_sizes = W_supports["sizes"].tolist() start_token_id = W_supports["start_token_id"].item() end_token_id = W_supports["end_token_id"].item() del W_supports["sizes"] del W_supports["start_token_id"] del W_supports["end_token_id"] q = self.BERT(**W_query) S = self.BERT(**W_supports) p_starts = None p_ends = None start_token_masks = W_supports["input_ids"] == start_token_id end_token_masks = W_supports["input_ids"] == end_token_id for i, size in enumerate(support_sizes): if i == 0: s = 0 else: s = support_sizes[i - 1] s_start = S[s : s + size][start_token_masks[s : s + size]] s_end = S[s : s + size][end_token_masks[s : s + size]] p_start = torch.matmul(q[i], s_start.T).sum(1).softmax(0) p_end = torch.matmul(q[i], s_end.T).sum(1).softmax(0) if p_starts is not None: p_starts = torch.vstack((p_starts, p_start)) p_ends = torch.vstack((p_ends, p_end)) else: p_starts = p_start p_ends = p_end return p_starts, p_ends
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/extracting_data.py
import getopt import json import os # import numpy as np import sys from collections import OrderedDict import datasets import numpy as np import torch from modeling_frcnn import GeneralizedRCNN from processing_image import Preprocess from utils import Config """ USAGE: ``python extracting_data.py -i <img_dir> -o <dataset_file>.datasets <batch_size>`` """ TEST = False CONFIG = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") DEFAULT_SCHEMA = datasets.Features( OrderedDict( { "attr_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "attr_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "boxes": datasets.Array2D((CONFIG.MAX_DETECTIONS, 4), dtype="float32"), "img_id": datasets.Value("int32"), "obj_ids": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "obj_probs": datasets.Sequence(length=CONFIG.MAX_DETECTIONS, feature=datasets.Value("float32")), "roi_features": datasets.Array2D((CONFIG.MAX_DETECTIONS, 2048), dtype="float32"), "sizes": datasets.Sequence(length=2, feature=datasets.Value("float32")), "preds_per_image": datasets.Value(dtype="int32"), } ) ) class Extract: def __init__(self, argv=sys.argv[1:]): inputdir = None outputfile = None subset_list = None batch_size = 1 opts, args = getopt.getopt(argv, "i:o:b:s", ["inputdir=", "outfile=", "batch_size=", "subset_list="]) for opt, arg in opts: if opt in ("-i", "--inputdir"): inputdir = arg elif opt in ("-o", "--outfile"): outputfile = arg elif opt in ("-b", "--batch_size"): batch_size = int(arg) elif opt in ("-s", "--subset_list"): subset_list = arg assert inputdir is not None # and os.path.isdir(inputdir), f"{inputdir}" assert outputfile is not None and not os.path.isfile(outputfile), f"{outputfile}" if subset_list is not None: with open(os.path.realpath(subset_list)) as f: self.subset_list = {self._vqa_file_split()[0] for x in tryload(f)} else: self.subset_list = None self.config = CONFIG if torch.cuda.is_available(): self.config.model.device = "cuda" self.inputdir = os.path.realpath(inputdir) self.outputfile = os.path.realpath(outputfile) self.preprocess = Preprocess(self.config) self.model = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.config) self.batch = batch_size if batch_size != 0 else 1 self.schema = DEFAULT_SCHEMA def _vqa_file_split(self, file): img_id = int(file.split(".")[0].split("_")[-1]) filepath = os.path.join(self.inputdir, file) return (img_id, filepath) @property def file_generator(self): batch = [] for i, file in enumerate(os.listdir(self.inputdir)): if self.subset_list is not None and i not in self.subset_list: continue batch.append(self._vqa_file_split(file)) if len(batch) == self.batch: temp = batch batch = [] yield list(map(list, zip(*temp))) for i in range(1): yield list(map(list, zip(*batch))) def __call__(self): # make writer if not TEST: writer = datasets.ArrowWriter(features=self.schema, path=self.outputfile) # do file generator for i, (img_ids, filepaths) in enumerate(self.file_generator): images, sizes, scales_yx = self.preprocess(filepaths) output_dict = self.model( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=self.config.MAX_DETECTIONS, pad_value=0, return_tensors="np", location="cpu", ) output_dict["boxes"] = output_dict.pop("normalized_boxes") if not TEST: output_dict["img_id"] = np.array(img_ids) batch = self.schema.encode_batch(output_dict) writer.write_batch(batch) if TEST: break # finalizer the writer if not TEST: num_examples, num_bytes = writer.finalize() print(f"Success! You wrote {num_examples} entry(s) and {num_bytes >> 20} mb") def tryload(stream): try: data = json.load(stream) try: data = list(data.keys()) except Exception: data = [d["img_id"] for d in data] except Exception: try: data = eval(stream.read()) except Exception: data = stream.read().split("\n") return data if __name__ == "__main__": extract = Extract(sys.argv[1:]) extract() if not TEST: dataset = datasets.Dataset.from_file(extract.outputfile) # wala! # print(np.array(dataset[0:2]["roi_features"]).shape)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/requirements.txt
appdirs==1.4.3 argon2-cffi==20.1.0 async-generator==1.10 attrs==20.2.0 backcall==0.2.0 CacheControl==0.12.6 certifi==2023.7.22 cffi==1.14.2 chardet==3.0.4 click==7.1.2 colorama==0.4.3 contextlib2==0.6.0 cycler==0.10.0 datasets==1.0.0 decorator==4.4.2 defusedxml==0.6.0 dill==0.3.2 distlib==0.3.0 distro==1.4.0 entrypoints==0.3 filelock==3.0.12 future==0.18.3 html5lib==1.0.1 idna==2.8 ipaddr==2.2.0 ipykernel==5.3.4 ipython ipython-genutils==0.2.0 ipywidgets==7.5.1 jedi==0.17.2 Jinja2>=2.11.3 joblib==1.2.0 jsonschema==3.2.0 jupyter==1.0.0 jupyter-client==6.1.7 jupyter-console==6.2.0 jupyter-core==4.6.3 jupyterlab-pygments==0.1.1 kiwisolver==1.2.0 lockfile==0.12.2 MarkupSafe==1.1.1 matplotlib==3.3.1 mistune==2.0.3 msgpack==0.6.2 nbclient==0.5.0 nbconvert==6.5.1 nbformat==5.0.7 nest-asyncio==1.4.0 notebook==6.4.12 numpy==1.22.0 opencv-python==4.4.0.42 packaging==20.3 pandas==1.1.2 pandocfilters==1.4.2 parso==0.7.1 pep517==0.8.2 pexpect==4.8.0 pickleshare==0.7.5 Pillow>=8.1.1 progress==1.5 prometheus-client==0.8.0 prompt-toolkit==3.0.7 ptyprocess==0.6.0 pyaml==20.4.0 pyarrow==1.0.1 pycparser==2.20 Pygments>=2.7.4 pyparsing==2.4.6 pyrsistent==0.16.0 python-dateutil==2.8.1 pytoml==0.1.21 pytz==2020.1 PyYAML>=5.4 pyzmq==19.0.2 qtconsole==4.7.7 QtPy==1.9.0 regex==2020.7.14 requests==2.31.0 retrying==1.3.3 sacremoses==0.0.43 Send2Trash==1.5.0 sentencepiece==0.1.91 six==1.14.0 terminado==0.8.3 testpath==0.4.4 tokenizers==0.8.1rc2 torch==1.6.0 torchvision==0.7.0 tornado==6.3.2 tqdm==4.48.2 traitlets git+https://github.com/huggingface/transformers.git urllib3==1.26.5 wcwidth==0.2.5 webencodings==0.5.1 wget==3.2 widgetsnbextension==3.5.1 xxhash==2.0.0
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/README.md
# LXMERT DEMO 1. make a virtualenv: ``virtualenv venv`` and activate ``source venv/bin/activate`` 2. install reqs: ``pip install -r ./requirements.txt`` 3. usage is as shown in demo.ipynb
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/processing_image.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class ResizeShortestEdge: def __init__(self, short_edge_length, max_size=sys.maxsize): """ Args: short_edge_length (list[min, max]) max_size (int): maximum allowed longest edge length. """ self.interp_method = "bilinear" self.max_size = max_size self.short_edge_length = short_edge_length def __call__(self, imgs): img_augs = [] for img in imgs: h, w = img.shape[:2] # later: provide list and randomly choose index for resize size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1) if size == 0: return img scale = size * 1.0 / min(h, w) if h < w: newh, neww = size, scale * w else: newh, neww = scale * h, size if max(newh, neww) > self.max_size: scale = self.max_size * 1.0 / max(newh, neww) newh = newh * scale neww = neww * scale neww = int(neww + 0.5) newh = int(newh + 0.5) if img.dtype == np.uint8: pil_image = Image.fromarray(img) pil_image = pil_image.resize((neww, newh), PILImageResampling.BILINEAR) img = np.asarray(pil_image) else: img = img.permute(2, 0, 1).unsqueeze(0) # 3, 0, 1) # hw(c) -> nchw img = nn.functional.interpolate( img, (newh, neww), mode=self.interp_method, align_corners=False ).squeeze(0) img_augs.append(img) return img_augs class Preprocess: def __init__(self, cfg): self.aug = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST) self.input_format = cfg.INPUT.FORMAT self.size_divisibility = cfg.SIZE_DIVISIBILITY self.pad_value = cfg.PAD_VALUE self.max_image_size = cfg.INPUT.MAX_SIZE_TEST self.device = cfg.MODEL.DEVICE self.pixel_std = torch.tensor(cfg.MODEL.PIXEL_STD).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) self.pixel_mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).to(self.device).view(len(cfg.MODEL.PIXEL_STD), 1, 1) self.normalizer = lambda x: (x - self.pixel_mean) / self.pixel_std def pad(self, images): max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) image_sizes = [im.shape[-2:] for im in images] images = [ nn.functional.pad( im, [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]], value=self.pad_value, ) for size, im in zip(image_sizes, images) ] return torch.stack(images), torch.tensor(image_sizes) def __call__(self, images, single_image=False): with torch.no_grad(): if not isinstance(images, list): images = [images] if single_image: assert len(images) == 1 for i in range(len(images)): if isinstance(images[i], torch.Tensor): images.insert(i, images.pop(i).to(self.device).float()) elif not isinstance(images[i], torch.Tensor): images.insert( i, torch.as_tensor(img_tensorize(images.pop(i), input_format=self.input_format)) .to(self.device) .float(), ) # resize smallest edge raw_sizes = torch.tensor([im.shape[:2] for im in images]) images = self.aug(images) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic images = [self.normalizer(x) for x in images] # now pad them to do the following operations images, sizes = self.pad(images) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad scales_yx = torch.true_divide(raw_sizes, sizes) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def _scale_box(boxes, scale_yx): boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def _clip_box(tensor, box_size: Tuple[int, int]): assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size tensor[:, 0].clamp_(min=0, max=w) tensor[:, 1].clamp_(min=0, max=h) tensor[:, 2].clamp_(min=0, max=w) tensor[:, 3].clamp_(min=0, max=h)
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/demo.ipynb
# %pip install-r requirements.txtfrom IPython.display import clear_output, Image, display import PIL.Image import io import json import torch import numpy as np from processing_image import Preprocess from visualizing_image import SingleImageViz from modeling_frcnn import GeneralizedRCNN from utils import Config import utils from transformers import LxmertForQuestionAnswering, LxmertTokenizer import wget import pickle import os # URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/images/input.jpg", URL = "https://vqa.cloudcv.org/media/test2014/COCO_test2014_000000262567.jpg" OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt" ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt" GQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/gqa/trainval_label2ans.json" VQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_label2ans.json" # for visualizing output def showarray(a, fmt="jpeg"): a = np.uint8(np.clip(a, 0, 255)) f = io.BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue()))# load object, attribute, and answer labels objids = utils.get_data(OBJ_URL) attrids = utils.get_data(ATTR_URL) gqa_answers = utils.get_data(GQA_URL) vqa_answers = utils.get_data(VQA_URL)# load models and model components frcnn_cfg = Config.from_pretrained("unc-nlp/frcnn-vg-finetuned") frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=frcnn_cfg) image_preprocess = Preprocess(frcnn_cfg) lxmert_tokenizer = LxmertTokenizer.from_pretrained("unc-nlp/lxmert-base-uncased") lxmert_gqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-gqa-uncased") lxmert_vqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-vqa-uncased")# image viz frcnn_visualizer = SingleImageViz(URL, id2obj=objids, id2attr=attrids) # run frcnn images, sizes, scales_yx = image_preprocess(URL) output_dict = frcnn( images, sizes, scales_yx=scales_yx, padding="max_detections", max_detections=frcnn_cfg.max_detections, return_tensors="pt", ) # add boxes and labels to the image frcnn_visualizer.draw_boxes( output_dict.get("boxes"), output_dict.pop("obj_ids"), output_dict.pop("obj_probs"), output_dict.pop("attr_ids"), output_dict.pop("attr_probs"), ) showarray(frcnn_visualizer._get_buffer())test_questions_for_url1 = [ "Where is this scene?", "what is the man riding?", "What is the man wearing?", "What is the color of the horse?", ] test_questions_for_url2 = [ "Where is the cat?", "What is near the disk?", "What is the color of the table?", "What is the color of the cat?", "What is the shape of the monitor?", ] # Very important that the boxes are normalized normalized_boxes = output_dict.get("normalized_boxes") features = output_dict.get("roi_features") for test_question in test_questions_for_url2: # run lxmert test_question = [test_question] inputs = lxmert_tokenizer( test_question, padding="max_length", max_length=20, truncation=True, return_token_type_ids=True, return_attention_mask=True, add_special_tokens=True, return_tensors="pt", ) # run lxmert(s) output_gqa = lxmert_gqa( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, visual_feats=features, visual_pos=normalized_boxes, token_type_ids=inputs.token_type_ids, output_attentions=False, ) output_vqa = lxmert_vqa( input_ids=inputs.input_ids, attention_mask=inputs.attention_mask, visual_feats=features, visual_pos=normalized_boxes, token_type_ids=inputs.token_type_ids, output_attentions=False, ) # get prediction pred_vqa = output_vqa["question_answering_score"].argmax(-1) pred_gqa = output_gqa["question_answering_score"].argmax(-1) print("Question:", test_question) print("prediction from LXMERT GQA:", gqa_answers[pred_gqa]) print("prediction from LXMERT VQA:", vqa_answers[pred_vqa])
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/modeling_frcnn.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 && Huggingface Co. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import itertools import math import os from abc import ABCMeta, abstractmethod from collections import OrderedDict, namedtuple from typing import Dict, List, Tuple import numpy as np import torch from torch import nn from torch.nn.modules.batchnorm import BatchNorm2d from torchvision.ops import RoIPool from torchvision.ops.boxes import batched_nms, nms from utils import WEIGHTS_NAME, Config, cached_path, hf_bucket_url, is_remote_url, load_checkpoint # other: def norm_box(boxes, raw_sizes): if not isinstance(boxes, torch.Tensor): normalized_boxes = boxes.copy() else: normalized_boxes = boxes.clone() normalized_boxes[:, :, (0, 2)] /= raw_sizes[:, 1] normalized_boxes[:, :, (1, 3)] /= raw_sizes[:, 0] return normalized_boxes def pad_list_tensors( list_tensors, preds_per_image, max_detections=None, return_tensors=None, padding=None, pad_value=0, location=None, ): """ location will always be cpu for np tensors """ if location is None: location = "cpu" assert return_tensors in {"pt", "np", None} assert padding in {"max_detections", "max_batch", None} new = [] if padding is None: if return_tensors is None: return list_tensors elif return_tensors == "pt": if not isinstance(list_tensors, torch.Tensor): return torch.stack(list_tensors).to(location) else: return list_tensors.to(location) else: if not isinstance(list_tensors, list): return np.array(list_tensors.to(location)) else: return list_tensors.to(location) if padding == "max_detections": assert max_detections is not None, "specify max number of detections per batch" elif padding == "max_batch": max_detections = max(preds_per_image) for i in range(len(list_tensors)): too_small = False tensor_i = list_tensors.pop(0) if tensor_i.ndim < 2: too_small = True tensor_i = tensor_i.unsqueeze(-1) assert isinstance(tensor_i, torch.Tensor) tensor_i = nn.functional.pad( input=tensor_i, pad=(0, 0, 0, max_detections - preds_per_image[i]), mode="constant", value=pad_value, ) if too_small: tensor_i = tensor_i.squeeze(-1) if return_tensors is None: if location == "cpu": tensor_i = tensor_i.cpu() tensor_i = tensor_i.tolist() if return_tensors == "np": if location == "cpu": tensor_i = tensor_i.cpu() tensor_i = tensor_i.numpy() else: if location == "cpu": tensor_i = tensor_i.cpu() new.append(tensor_i) if return_tensors == "np": return np.stack(new, axis=0) elif return_tensors == "pt" and not isinstance(new, torch.Tensor): return torch.stack(new, dim=0) else: return list_tensors def do_nms(boxes, scores, image_shape, score_thresh, nms_thresh, mind, maxd): scores = scores[:, :-1] num_bbox_reg_classes = boxes.shape[1] // 4 # Convert to Boxes to use the `clip` function ... boxes = boxes.reshape(-1, 4) _clip_box(boxes, image_shape) boxes = boxes.view(-1, num_bbox_reg_classes, 4) # R x C x 4 # Select max scores max_scores, max_classes = scores.max(1) # R x C --> R num_objs = boxes.size(0) boxes = boxes.view(-1, 4) idxs = torch.arange(num_objs).to(boxes.device) * num_bbox_reg_classes + max_classes max_boxes = boxes[idxs] # Select max boxes according to the max scores. # Apply NMS keep = nms(max_boxes, max_scores, nms_thresh) keep = keep[:maxd] if keep.shape[-1] >= mind and keep.shape[-1] <= maxd: max_boxes, max_scores = max_boxes[keep], max_scores[keep] classes = max_classes[keep] return max_boxes, max_scores, classes, keep else: return None # Helper Functions def _clip_box(tensor, box_size: Tuple[int, int]): assert torch.isfinite(tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size tensor[:, 0].clamp_(min=0, max=w) tensor[:, 1].clamp_(min=0, max=h) tensor[:, 2].clamp_(min=0, max=w) tensor[:, 3].clamp_(min=0, max=h) def _nonempty_boxes(box, threshold: float = 0.0) -> torch.Tensor: widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def get_norm(norm, out_channels): if isinstance(norm, str): if len(norm) == 0: return None norm = { "BN": BatchNorm2d, "GN": lambda channels: nn.GroupNorm(32, channels), "nnSyncBN": nn.SyncBatchNorm, # keep for debugging "": lambda x: x, }[norm] return norm(out_channels) def _create_grid_offsets(size: List[int], stride: int, offset: float, device): grid_height, grid_width = size shifts_x = torch.arange( offset * stride, grid_width * stride, step=stride, dtype=torch.float32, device=device, ) shifts_y = torch.arange( offset * stride, grid_height * stride, step=stride, dtype=torch.float32, device=device, ) shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) shift_x = shift_x.reshape(-1) shift_y = shift_y.reshape(-1) return shift_x, shift_y def build_backbone(cfg): input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN)) norm = cfg.RESNETS.NORM stem = BasicStem( in_channels=input_shape.channels, out_channels=cfg.RESNETS.STEM_OUT_CHANNELS, norm=norm, caffe_maxpool=cfg.MODEL.MAX_POOL, ) freeze_at = cfg.BACKBONE.FREEZE_AT if freeze_at >= 1: for p in stem.parameters(): p.requires_grad = False out_features = cfg.RESNETS.OUT_FEATURES depth = cfg.RESNETS.DEPTH num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group in_channels = cfg.RESNETS.STEM_OUT_CHANNELS out_channels = cfg.RESNETS.RES2_OUT_CHANNELS stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 res5_dilation = cfg.RESNETS.RES5_DILATION assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation) num_blocks_per_stage = {50: [3, 4, 6, 3], 101: [3, 4, 23, 3], 152: [3, 8, 36, 3]}[depth] stages = [] out_stage_idx = [{"res2": 2, "res3": 3, "res4": 4, "res5": 5}[f] for f in out_features] max_stage_idx = max(out_stage_idx) for idx, stage_idx in enumerate(range(2, max_stage_idx + 1)): dilation = res5_dilation if stage_idx == 5 else 1 first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2 stage_kargs = { "num_blocks": num_blocks_per_stage[idx], "first_stride": first_stride, "in_channels": in_channels, "bottleneck_channels": bottleneck_channels, "out_channels": out_channels, "num_groups": num_groups, "norm": norm, "stride_in_1x1": stride_in_1x1, "dilation": dilation, } stage_kargs["block_class"] = BottleneckBlock blocks = ResNet.make_stage(**stage_kargs) in_channels = out_channels out_channels *= 2 bottleneck_channels *= 2 if freeze_at >= stage_idx: for block in blocks: block.freeze() stages.append(blocks) return ResNet(stem, stages, out_features=out_features) def find_top_rpn_proposals( proposals, pred_objectness_logits, images, image_sizes, nms_thresh, pre_nms_topk, post_nms_topk, min_box_side_len, training, ): """Args: proposals (list[Tensor]): (L, N, Hi*Wi*A, 4). pred_objectness_logits: tensors of length L. nms_thresh (float): IoU threshold to use for NMS pre_nms_topk (int): before nms post_nms_topk (int): after nms min_box_side_len (float): minimum proposal box side training (bool): True if proposals are to be used in training, Returns: results (List[Dict]): stores post_nms_topk object proposals for image i. """ num_images = len(images) device = proposals[0].device # 1. Select top-k anchor for every level and every image topk_scores = [] # #lvl Tensor, each of shape N x topk topk_proposals = [] level_ids = [] # #lvl Tensor, each of shape (topk,) batch_idx = torch.arange(num_images, device=device) for level_id, proposals_i, logits_i in zip(itertools.count(), proposals, pred_objectness_logits): Hi_Wi_A = logits_i.shape[1] num_proposals_i = min(pre_nms_topk, Hi_Wi_A) # sort is faster than topk (https://github.com/pytorch/pytorch/issues/22812) # topk_scores_i, topk_idx = logits_i.topk(num_proposals_i, dim=1) logits_i, idx = logits_i.sort(descending=True, dim=1) topk_scores_i = logits_i[batch_idx, :num_proposals_i] topk_idx = idx[batch_idx, :num_proposals_i] # each is N x topk topk_proposals_i = proposals_i[batch_idx[:, None], topk_idx] # N x topk x 4 topk_proposals.append(topk_proposals_i) topk_scores.append(topk_scores_i) level_ids.append(torch.full((num_proposals_i,), level_id, dtype=torch.int64, device=device)) # 2. Concat all levels together topk_scores = torch.cat(topk_scores, dim=1) topk_proposals = torch.cat(topk_proposals, dim=1) level_ids = torch.cat(level_ids, dim=0) # if I change to batched_nms, I wonder if this will make a difference # 3. For each image, run a per-level NMS, and choose topk results. results = [] for n, image_size in enumerate(image_sizes): boxes = topk_proposals[n] scores_per_img = topk_scores[n] # I will have to take a look at the boxes clip method _clip_box(boxes, image_size) # filter empty boxes keep = _nonempty_boxes(boxes, threshold=min_box_side_len) lvl = level_ids if keep.sum().item() != len(boxes): boxes, scores_per_img, lvl = ( boxes[keep], scores_per_img[keep], level_ids[keep], ) keep = batched_nms(boxes, scores_per_img, lvl, nms_thresh) keep = keep[:post_nms_topk] res = (boxes[keep], scores_per_img[keep]) results.append(res) # I wonder if it would be possible for me to pad all these things. return results def subsample_labels(labels, num_samples, positive_fraction, bg_label): """ Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = torch.nonzero((labels != -1) & (labels != bg_label)).squeeze(1) negative = torch.nonzero(labels == bg_label).squeeze(1) num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx def add_ground_truth_to_proposals(gt_boxes, proposals): raise NotImplementedError() def add_ground_truth_to_proposals_single_image(gt_boxes, proposals): raise NotImplementedError() def _fmt_box_list(box_tensor, batch_index: int): repeated_index = torch.full( (len(box_tensor), 1), batch_index, dtype=box_tensor.dtype, device=box_tensor.device, ) return torch.cat((repeated_index, box_tensor), dim=1) def convert_boxes_to_pooler_format(box_lists: List[torch.Tensor]): pooler_fmt_boxes = torch.cat( [_fmt_box_list(box_list, i) for i, box_list in enumerate(box_lists)], dim=0, ) return pooler_fmt_boxes def assign_boxes_to_levels( box_lists: List[torch.Tensor], min_level: int, max_level: int, canonical_box_size: int, canonical_level: int, ): box_sizes = torch.sqrt(torch.cat([boxes.area() for boxes in box_lists])) # Eqn.(1) in FPN paper level_assignments = torch.floor(canonical_level + torch.log2(box_sizes / canonical_box_size + 1e-8)) # clamp level to (min, max), in case the box size is too large or too small # for the available feature maps level_assignments = torch.clamp(level_assignments, min=min_level, max=max_level) return level_assignments.to(torch.int64) - min_level # Helper Classes class _NewEmptyTensorOp(torch.autograd.Function): @staticmethod def forward(ctx, x, new_shape): ctx.shape = x.shape return x.new_empty(new_shape) @staticmethod def backward(ctx, grad): shape = ctx.shape return _NewEmptyTensorOp.apply(grad, shape), None class ShapeSpec(namedtuple("_ShapeSpec", ["channels", "height", "width", "stride"])): def __new__(cls, *, channels=None, height=None, width=None, stride=None): return super().__new__(cls, channels, height, width, stride) class Box2BoxTransform(object): """ This R-CNN transformation scales the box's width and height by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height). """ def __init__(self, weights: Tuple[float, float, float, float], scale_clamp: float = None): """ Args: weights (4-element tuple): Scaling factors that are applied to the (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set such that the deltas have unit variance; now they are treated as hyperparameters of the system. scale_clamp (float): When predicting deltas, the predicted box scaling factors (dw and dh) are clamped such that they are <= scale_clamp. """ self.weights = weights if scale_clamp is not None: self.scale_clamp = scale_clamp else: """ Value for clamping large dw and dh predictions. The heuristic is that we clamp such that dw and dh are no larger than what would transform a 16px box into a 1000px box (based on a small anchor, 16px, and a typical image size, 1000px). """ self.scale_clamp = math.log(1000.0 / 16) def get_deltas(self, src_boxes, target_boxes): """ Get box regression transformation deltas (dx, dy, dw, dh) that can be used to transform the `src_boxes` into the `target_boxes`. That is, the relation ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless any delta is too large and is clamped). Args: src_boxes (Tensor): source boxes, e.g., object proposals target_boxes (Tensor): target of the transformation, e.g., ground-truth boxes. """ assert isinstance(src_boxes, torch.Tensor), type(src_boxes) assert isinstance(target_boxes, torch.Tensor), type(target_boxes) src_widths = src_boxes[:, 2] - src_boxes[:, 0] src_heights = src_boxes[:, 3] - src_boxes[:, 1] src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights target_widths = target_boxes[:, 2] - target_boxes[:, 0] target_heights = target_boxes[:, 3] - target_boxes[:, 1] target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights wx, wy, ww, wh = self.weights dx = wx * (target_ctr_x - src_ctr_x) / src_widths dy = wy * (target_ctr_y - src_ctr_y) / src_heights dw = ww * torch.log(target_widths / src_widths) dh = wh * torch.log(target_heights / src_heights) deltas = torch.stack((dx, dy, dw, dh), dim=1) assert (src_widths > 0).all().item(), "Input boxes to Box2BoxTransform are not valid!" return deltas def apply_deltas(self, deltas, boxes): """ Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`. Args: deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1. deltas[i] represents k potentially different class-specific box transformations for the single box boxes[i]. boxes (Tensor): boxes to transform, of shape (N, 4) """ boxes = boxes.to(deltas.dtype) widths = boxes[:, 2] - boxes[:, 0] heights = boxes[:, 3] - boxes[:, 1] ctr_x = boxes[:, 0] + 0.5 * widths ctr_y = boxes[:, 1] + 0.5 * heights wx, wy, ww, wh = self.weights dx = deltas[:, 0::4] / wx dy = deltas[:, 1::4] / wy dw = deltas[:, 2::4] / ww dh = deltas[:, 3::4] / wh # Prevent sending too large values into torch.exp() dw = torch.clamp(dw, max=self.scale_clamp) dh = torch.clamp(dh, max=self.scale_clamp) pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] pred_w = torch.exp(dw) * widths[:, None] pred_h = torch.exp(dh) * heights[:, None] pred_boxes = torch.zeros_like(deltas) pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w # x1 pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h # y1 pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w # x2 pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h # y2 return pred_boxes class Matcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__( self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False, ): """ Args: thresholds (list): a list of thresholds used to stratify predictions into levels. labels (list): a list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (bool): if True, produce additional matches or predictions with maximum match quality lower than high_threshold. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ thresholds = thresholds[:] assert thresholds[0] > 0 thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) assert all(low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])) assert all(label_i in [-1, 0, 1] for label_i in labels) assert len(labels) == len(thresholds) - 1 self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in :meth:`set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, # can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of Faster R-CNN. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. of_quality_inds = match_quality_matrix == highest_quality_foreach_gt[:, None] if of_quality_inds.dim() == 0: (_, pred_inds_with_highest_quality) = of_quality_inds.unsqueeze(0).nonzero().unbind(1) else: (_, pred_inds_with_highest_quality) = of_quality_inds.nonzero().unbind(1) match_labels[pred_inds_with_highest_quality] = 1 class RPNOutputs(object): def __init__( self, box2box_transform, anchor_matcher, batch_size_per_image, positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, boundary_threshold=0, gt_boxes=None, smooth_l1_beta=0.0, ): """ Args: box2box_transform (Box2BoxTransform): :class:`Box2BoxTransform` instance for anchor-proposal transformations. anchor_matcher (Matcher): :class:`Matcher` instance for matching anchors to ground-truth boxes; used to determine training labels. batch_size_per_image (int): number of proposals to sample when training positive_fraction (float): target fraction of sampled proposals that should be positive images (ImageList): :class:`ImageList` instance representing N input images pred_objectness_logits (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A, Hi, W) pred_anchor_deltas (list[Tensor]): A list of L elements. Element i is a tensor of shape (N, A*4, Hi, Wi) anchors (list[torch.Tensor]): nested list of boxes. anchors[i][j] at (n, l) stores anchor array for feature map l boundary_threshold (int): if >= 0, then anchors that extend beyond the image boundary by more than boundary_thresh are not used in training. gt_boxes (list[Boxes], optional): A list of N elements. smooth_l1_beta (float): The transition point between L1 and L2 lossn. When set to 0, the loss becomes L1. When +inf, it is ignored """ self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher self.batch_size_per_image = batch_size_per_image self.positive_fraction = positive_fraction self.pred_objectness_logits = pred_objectness_logits self.pred_anchor_deltas = pred_anchor_deltas self.anchors = anchors self.gt_boxes = gt_boxes self.num_feature_maps = len(pred_objectness_logits) self.num_images = len(images) self.boundary_threshold = boundary_threshold self.smooth_l1_beta = smooth_l1_beta def _get_ground_truth(self): raise NotImplementedError() def predict_proposals(self): # pred_anchor_deltas: (L, N, ? Hi, Wi) # anchors:(N, L, -1, B) # here we loop over specific feature map, NOT images proposals = [] anchors = self.anchors.transpose(0, 1) for anchors_i, pred_anchor_deltas_i in zip(anchors, self.pred_anchor_deltas): B = anchors_i.size(-1) N, _, Hi, Wi = pred_anchor_deltas_i.shape anchors_i = anchors_i.flatten(start_dim=0, end_dim=1) pred_anchor_deltas_i = pred_anchor_deltas_i.view(N, -1, B, Hi, Wi).permute(0, 3, 4, 1, 2).reshape(-1, B) proposals_i = self.box2box_transform.apply_deltas(pred_anchor_deltas_i, anchors_i) # Append feature map proposals with shape (N, Hi*Wi*A, B) proposals.append(proposals_i.view(N, -1, B)) proposals = torch.stack(proposals) return proposals def predict_objectness_logits(self): """ Returns: pred_objectness_logits (list[Tensor]) -> (N, Hi*Wi*A). """ pred_objectness_logits = [ # Reshape: (N, A, Hi, Wi) -> (N, Hi, Wi, A) -> (N, Hi*Wi*A) score.permute(0, 2, 3, 1).reshape(self.num_images, -1) for score in self.pred_objectness_logits ] return pred_objectness_logits # Main Classes class Conv2d(nn.Conv2d): def __init__(self, *args, **kwargs): norm = kwargs.pop("norm", None) activation = kwargs.pop("activation", None) super().__init__(*args, **kwargs) self.norm = norm self.activation = activation def forward(self, x): if x.numel() == 0 and self.training: assert not isinstance(self.norm, nn.SyncBatchNorm) if x.numel() == 0: assert not isinstance(self.norm, nn.GroupNorm) output_shape = [ (i + 2 * p - (di * (k - 1) + 1)) // s + 1 for i, p, di, k, s in zip( x.shape[-2:], self.padding, self.dilation, self.kernel_size, self.stride, ) ] output_shape = [x.shape[0], self.weight.shape[0]] + output_shape empty = _NewEmptyTensorOp.apply(x, output_shape) if self.training: _dummy = sum(x.view(-1)[0] for x in self.parameters()) * 0.0 return empty + _dummy else: return empty x = super().forward(x) if self.norm is not None: x = self.norm(x) if self.activation is not None: x = self.activation(x) return x class LastLevelMaxPool(nn.Module): """ This module is used in the original FPN to generate a downsampled P6 feature from P5. """ def __init__(self): super().__init__() self.num_levels = 1 self.in_feature = "p5" def forward(self, x): return [nn.functional.max_pool2d(x, kernel_size=1, stride=2, padding=0)] class LastLevelP6P7(nn.Module): """ This module is used in RetinaNet to generate extra layers, P6 and P7 from C5 feature. """ def __init__(self, in_channels, out_channels): super().__init__() self.num_levels = 2 self.in_feature = "res5" self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) def forward(self, c5): p6 = self.p6(c5) p7 = self.p7(nn.functional.relu(p6)) return [p6, p7] class BasicStem(nn.Module): def __init__(self, in_channels=3, out_channels=64, norm="BN", caffe_maxpool=False): super().__init__() self.conv1 = Conv2d( in_channels, out_channels, kernel_size=7, stride=2, padding=3, bias=False, norm=get_norm(norm, out_channels), ) self.caffe_maxpool = caffe_maxpool # use pad 1 instead of pad zero def forward(self, x): x = self.conv1(x) x = nn.functional.relu_(x) if self.caffe_maxpool: x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=0, ceil_mode=True) else: x = nn.functional.max_pool2d(x, kernel_size=3, stride=2, padding=1) return x @property def out_channels(self): return self.conv1.out_channels @property def stride(self): return 4 # = stride 2 conv -> stride 2 max pool class ResNetBlockBase(nn.Module): def __init__(self, in_channels, out_channels, stride): super().__init__() self.in_channels = in_channels self.out_channels = out_channels self.stride = stride def freeze(self): for p in self.parameters(): p.requires_grad = False return self class BottleneckBlock(ResNetBlockBase): def __init__( self, in_channels, out_channels, bottleneck_channels, stride=1, num_groups=1, norm="BN", stride_in_1x1=False, dilation=1, ): super().__init__(in_channels, out_channels, stride) if in_channels != out_channels: self.shortcut = Conv2d( in_channels, out_channels, kernel_size=1, stride=stride, bias=False, norm=get_norm(norm, out_channels), ) else: self.shortcut = None # The original MSRA ResNet models have stride in the first 1x1 conv # The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have # stride in the 3x3 conv stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride) self.conv1 = Conv2d( in_channels, bottleneck_channels, kernel_size=1, stride=stride_1x1, bias=False, norm=get_norm(norm, bottleneck_channels), ) self.conv2 = Conv2d( bottleneck_channels, bottleneck_channels, kernel_size=3, stride=stride_3x3, padding=1 * dilation, bias=False, groups=num_groups, dilation=dilation, norm=get_norm(norm, bottleneck_channels), ) self.conv3 = Conv2d( bottleneck_channels, out_channels, kernel_size=1, bias=False, norm=get_norm(norm, out_channels), ) def forward(self, x): out = self.conv1(x) out = nn.functional.relu_(out) out = self.conv2(out) out = nn.functional.relu_(out) out = self.conv3(out) if self.shortcut is not None: shortcut = self.shortcut(x) else: shortcut = x out += shortcut out = nn.functional.relu_(out) return out class Backbone(nn.Module, metaclass=ABCMeta): def __init__(self): super().__init__() @abstractmethod def forward(self): pass @property def size_divisibility(self): """ Some backbones require the input height and width to be divisible by a specific integer. This is typically true for encoder / decoder type networks with lateral connection (e.g., FPN) for which feature maps need to match dimension in the "bottom up" and "top down" paths. Set to 0 if no specific input size divisibility is required. """ return 0 def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name], ) for name in self._out_features } @property def out_features(self): """deprecated""" return self._out_features @property def out_feature_strides(self): """deprecated""" return {f: self._out_feature_strides[f] for f in self._out_features} @property def out_feature_channels(self): """deprecated""" return {f: self._out_feature_channels[f] for f in self._out_features} class ResNet(Backbone): def __init__(self, stem, stages, num_classes=None, out_features=None): """ Args: stem (nn.Module): a stem module stages (list[list[ResNetBlock]]): several (typically 4) stages, each contains multiple :class:`ResNetBlockBase`. num_classes (None or int): if None, will not perform classification. out_features (list[str]): name of the layers whose outputs should be returned in forward. Can be anything in: "stem", "linear", or "res2" ... If None, will return the output of the last layer. """ super(ResNet, self).__init__() self.stem = stem self.num_classes = num_classes current_stride = self.stem.stride self._out_feature_strides = {"stem": current_stride} self._out_feature_channels = {"stem": self.stem.out_channels} self.stages_and_names = [] for i, blocks in enumerate(stages): for block in blocks: assert isinstance(block, ResNetBlockBase), block curr_channels = block.out_channels stage = nn.Sequential(*blocks) name = "res" + str(i + 2) self.add_module(name, stage) self.stages_and_names.append((stage, name)) self._out_feature_strides[name] = current_stride = int( current_stride * np.prod([k.stride for k in blocks]) ) self._out_feature_channels[name] = blocks[-1].out_channels if num_classes is not None: self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.linear = nn.Linear(curr_channels, num_classes) # Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour": # "The 1000-way fully-connected layer is initialized by # drawing weights from a zero-mean Gaussian with std of 0.01." nn.init.normal_(self.linear.weight, stddev=0.01) name = "linear" if out_features is None: out_features = [name] self._out_features = out_features assert len(self._out_features) children = [x[0] for x in self.named_children()] for out_feature in self._out_features: assert out_feature in children, "Available children: {}".format(", ".join(children)) def forward(self, x): outputs = {} x = self.stem(x) if "stem" in self._out_features: outputs["stem"] = x for stage, name in self.stages_and_names: x = stage(x) if name in self._out_features: outputs[name] = x if self.num_classes is not None: x = self.avgpool(x) x = self.linear(x) if "linear" in self._out_features: outputs["linear"] = x return outputs def output_shape(self): return { name: ShapeSpec( channels=self._out_feature_channels[name], stride=self._out_feature_strides[name], ) for name in self._out_features } @staticmethod def make_stage( block_class, num_blocks, first_stride=None, *, in_channels, out_channels, **kwargs, ): """ Usually, layers that produce the same feature map spatial size are defined as one "stage". Under such definition, stride_per_block[1:] should all be 1. """ if first_stride is not None: assert "stride" not in kwargs and "stride_per_block" not in kwargs kwargs["stride_per_block"] = [first_stride] + [1] * (num_blocks - 1) blocks = [] for i in range(num_blocks): curr_kwargs = {} for k, v in kwargs.items(): if k.endswith("_per_block"): assert ( len(v) == num_blocks ), f"Argument '{k}' of make_stage should have the same length as num_blocks={num_blocks}." newk = k[: -len("_per_block")] assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!" curr_kwargs[newk] = v[i] else: curr_kwargs[k] = v blocks.append(block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)) in_channels = out_channels return blocks class ROIPooler(nn.Module): """ Region of interest feature map pooler that supports pooling from one or more feature maps. """ def __init__( self, output_size, scales, sampling_ratio, canonical_box_size=224, canonical_level=4, ): super().__init__() # assumption that stride is a power of 2. min_level = -math.log2(scales[0]) max_level = -math.log2(scales[-1]) # a bunch of testing assert math.isclose(min_level, int(min_level)) and math.isclose(max_level, int(max_level)) assert len(scales) == max_level - min_level + 1, "not pyramid" assert 0 < min_level and min_level <= max_level if isinstance(output_size, int): output_size = (output_size, output_size) assert len(output_size) == 2 and isinstance(output_size[0], int) and isinstance(output_size[1], int) if len(scales) > 1: assert min_level <= canonical_level and canonical_level <= max_level assert canonical_box_size > 0 self.output_size = output_size self.min_level = int(min_level) self.max_level = int(max_level) self.level_poolers = nn.ModuleList(RoIPool(output_size, spatial_scale=scale) for scale in scales) self.canonical_level = canonical_level self.canonical_box_size = canonical_box_size def forward(self, feature_maps, boxes): """ Args: feature_maps: List[torch.Tensor(N,C,W,H)] box_lists: list[torch.Tensor]) Returns: A tensor of shape(N*B, Channels, output_size, output_size) """ x = list(feature_maps.values()) num_level_assignments = len(self.level_poolers) assert len(x) == num_level_assignments and len(boxes) == x[0].size(0) pooler_fmt_boxes = convert_boxes_to_pooler_format(boxes) if num_level_assignments == 1: return self.level_poolers[0](x[0], pooler_fmt_boxes) level_assignments = assign_boxes_to_levels( boxes, self.min_level, self.max_level, self.canonical_box_size, self.canonical_level, ) num_boxes = len(pooler_fmt_boxes) num_channels = x[0].shape[1] output_size = self.output_size[0] dtype, device = x[0].dtype, x[0].device output = torch.zeros( (num_boxes, num_channels, output_size, output_size), dtype=dtype, device=device, ) for level, (x_level, pooler) in enumerate(zip(x, self.level_poolers)): inds = torch.nonzero(level_assignments == level).squeeze(1) pooler_fmt_boxes_level = pooler_fmt_boxes[inds] output[inds] = pooler(x_level, pooler_fmt_boxes_level) return output class ROIOutputs(object): def __init__(self, cfg, training=False): self.smooth_l1_beta = cfg.ROI_BOX_HEAD.SMOOTH_L1_BETA self.box2box_transform = Box2BoxTransform(weights=cfg.ROI_BOX_HEAD.BBOX_REG_WEIGHTS) self.training = training self.score_thresh = cfg.ROI_HEADS.SCORE_THRESH_TEST self.min_detections = cfg.MIN_DETECTIONS self.max_detections = cfg.MAX_DETECTIONS nms_thresh = cfg.ROI_HEADS.NMS_THRESH_TEST if not isinstance(nms_thresh, list): nms_thresh = [nms_thresh] self.nms_thresh = nms_thresh def _predict_boxes(self, proposals, box_deltas, preds_per_image): num_pred = box_deltas.size(0) B = proposals[0].size(-1) K = box_deltas.size(-1) // B box_deltas = box_deltas.view(num_pred * K, B) proposals = torch.cat(proposals, dim=0).unsqueeze(-2).expand(num_pred, K, B) proposals = proposals.reshape(-1, B) boxes = self.box2box_transform.apply_deltas(box_deltas, proposals) return boxes.view(num_pred, K * B).split(preds_per_image, dim=0) def _predict_objs(self, obj_logits, preds_per_image): probs = nn.functional.softmax(obj_logits, dim=-1) probs = probs.split(preds_per_image, dim=0) return probs def _predict_attrs(self, attr_logits, preds_per_image): attr_logits = attr_logits[..., :-1].softmax(-1) attr_probs, attrs = attr_logits.max(-1) return attr_probs.split(preds_per_image, dim=0), attrs.split(preds_per_image, dim=0) @torch.no_grad() def inference( self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=None, ): # only the pred boxes is the preds_per_image = [p.size(0) for p in pred_boxes] boxes_all = self._predict_boxes(pred_boxes, box_deltas, preds_per_image) obj_scores_all = self._predict_objs(obj_logits, preds_per_image) # list of length N attr_probs_all, attrs_all = self._predict_attrs(attr_logits, preds_per_image) features = features.split(preds_per_image, dim=0) # fun for each image too, also I can experiment and do multiple images final_results = [] zipped = zip(boxes_all, obj_scores_all, attr_probs_all, attrs_all, sizes) for i, (boxes, obj_scores, attr_probs, attrs, size) in enumerate(zipped): for nms_t in self.nms_thresh: outputs = do_nms( boxes, obj_scores, size, self.score_thresh, nms_t, self.min_detections, self.max_detections, ) if outputs is not None: max_boxes, max_scores, classes, ids = outputs break if scales is not None: scale_yx = scales[i] max_boxes[:, 0::2] *= scale_yx[1] max_boxes[:, 1::2] *= scale_yx[0] final_results.append( ( max_boxes, classes, max_scores, attrs[ids], attr_probs[ids], features[i][ids], ) ) boxes, classes, class_probs, attrs, attr_probs, roi_features = map(list, zip(*final_results)) return boxes, classes, class_probs, attrs, attr_probs, roi_features def training(self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes): pass def __call__( self, obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=None, ): if self.training: raise NotImplementedError() return self.inference( obj_logits, attr_logits, box_deltas, pred_boxes, features, sizes, scales=scales, ) class Res5ROIHeads(nn.Module): """ ROIHeads perform all per-region computation in an R-CNN. It contains logic of cropping the regions, extract per-region features (by the res-5 block in this case), and make per-region predictions. """ def __init__(self, cfg, input_shape): super().__init__() self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE self.positive_sample_fraction = cfg.ROI_HEADS.POSITIVE_FRACTION self.in_features = cfg.ROI_HEADS.IN_FEATURES self.num_classes = cfg.ROI_HEADS.NUM_CLASSES self.proposal_append_gt = cfg.ROI_HEADS.PROPOSAL_APPEND_GT self.feature_strides = {k: v.stride for k, v in input_shape.items()} self.feature_channels = {k: v.channels for k, v in input_shape.items()} self.cls_agnostic_bbox_reg = cfg.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG self.stage_channel_factor = 2**3 # res5 is 8x res2 self.out_channels = cfg.RESNETS.RES2_OUT_CHANNELS * self.stage_channel_factor # self.proposal_matcher = Matcher( # cfg.ROI_HEADS.IOU_THRESHOLDS, # cfg.ROI_HEADS.IOU_LABELS, # allow_low_quality_matches=False, # ) pooler_resolution = cfg.ROI_BOX_HEAD.POOLER_RESOLUTION pooler_scales = (1.0 / self.feature_strides[self.in_features[0]],) sampling_ratio = cfg.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO res5_halve = cfg.ROI_BOX_HEAD.RES5HALVE use_attr = cfg.ROI_BOX_HEAD.ATTR num_attrs = cfg.ROI_BOX_HEAD.NUM_ATTRS self.pooler = ROIPooler( output_size=pooler_resolution, scales=pooler_scales, sampling_ratio=sampling_ratio, ) self.res5 = self._build_res5_block(cfg) if not res5_halve: """ Modifications for VG in RoI heads: 1. Change the stride of conv1 and shortcut in Res5.Block1 from 2 to 1 2. Modifying all conv2 with (padding: 1 --> 2) and (dilation: 1 --> 2) """ self.res5[0].conv1.stride = (1, 1) self.res5[0].shortcut.stride = (1, 1) for i in range(3): self.res5[i].conv2.padding = (2, 2) self.res5[i].conv2.dilation = (2, 2) self.box_predictor = FastRCNNOutputLayers( self.out_channels, self.num_classes, self.cls_agnostic_bbox_reg, use_attr=use_attr, num_attrs=num_attrs, ) def _build_res5_block(self, cfg): stage_channel_factor = self.stage_channel_factor # res5 is 8x res2 num_groups = cfg.RESNETS.NUM_GROUPS width_per_group = cfg.RESNETS.WIDTH_PER_GROUP bottleneck_channels = num_groups * width_per_group * stage_channel_factor out_channels = self.out_channels stride_in_1x1 = cfg.RESNETS.STRIDE_IN_1X1 norm = cfg.RESNETS.NORM blocks = ResNet.make_stage( BottleneckBlock, 3, first_stride=2, in_channels=out_channels // 2, bottleneck_channels=bottleneck_channels, out_channels=out_channels, num_groups=num_groups, norm=norm, stride_in_1x1=stride_in_1x1, ) return nn.Sequential(*blocks) def _shared_roi_transform(self, features, boxes): x = self.pooler(features, boxes) return self.res5(x) def forward(self, features, proposal_boxes, gt_boxes=None): if self.training: """ see https://github.com/airsplay/py-bottom-up-attention/\ blob/master/detectron2/modeling/roi_heads/roi_heads.py """ raise NotImplementedError() assert not proposal_boxes[0].requires_grad box_features = self._shared_roi_transform(features, proposal_boxes) feature_pooled = box_features.mean(dim=[2, 3]) # pooled to 1x1 obj_logits, attr_logits, pred_proposal_deltas = self.box_predictor(feature_pooled) return obj_logits, attr_logits, pred_proposal_deltas, feature_pooled class AnchorGenerator(nn.Module): """ For a set of image sizes and feature maps, computes a set of anchors. """ def __init__(self, cfg, input_shape: List[ShapeSpec]): super().__init__() sizes = cfg.ANCHOR_GENERATOR.SIZES aspect_ratios = cfg.ANCHOR_GENERATOR.ASPECT_RATIOS self.strides = [x.stride for x in input_shape] self.offset = cfg.ANCHOR_GENERATOR.OFFSET assert 0.0 <= self.offset < 1.0, self.offset """ sizes (list[list[int]]): sizes[i] is the list of anchor sizes for feat map i 1. given in absolute lengths in units of the input image; 2. they do not dynamically scale if the input image size changes. aspect_ratios (list[list[float]]) strides (list[int]): stride of each input feature. """ self.num_features = len(self.strides) self.cell_anchors = nn.ParameterList(self._calculate_anchors(sizes, aspect_ratios)) self._spacial_feat_dim = 4 def _calculate_anchors(self, sizes, aspect_ratios): # If one size (or aspect ratio) is specified and there are multiple feature # maps, then we "broadcast" anchors of that single size (or aspect ratio) if len(sizes) == 1: sizes *= self.num_features if len(aspect_ratios) == 1: aspect_ratios *= self.num_features assert self.num_features == len(sizes) assert self.num_features == len(aspect_ratios) cell_anchors = [self.generate_cell_anchors(s, a).float() for s, a in zip(sizes, aspect_ratios)] return cell_anchors @property def box_dim(self): return self._spacial_feat_dim @property def num_cell_anchors(self): """ Returns: list[int]: Each int is the number of anchors at every pixel location, on that feature map. """ return [len(cell_anchors) for cell_anchors in self.cell_anchors] def grid_anchors(self, grid_sizes): anchors = [] for size, stride, base_anchors in zip(grid_sizes, self.strides, self.cell_anchors): shift_x, shift_y = _create_grid_offsets(size, stride, self.offset, base_anchors.device) shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) return anchors def generate_cell_anchors(self, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)): """ anchors are continuous geometric rectangles centered on one feature map point sample. We can later build the set of anchors for the entire feature map by tiling these tensors """ anchors = [] for size in sizes: area = size**2.0 for aspect_ratio in aspect_ratios: w = math.sqrt(area / aspect_ratio) h = aspect_ratio * w x0, y0, x1, y1 = -w / 2.0, -h / 2.0, w / 2.0, h / 2.0 anchors.append([x0, y0, x1, y1]) return nn.Parameter(torch.tensor(anchors)) def forward(self, features): """ Args: features List[torch.Tensor]: list of feature maps on which to generate anchors. Returns: torch.Tensor: a list of #image elements. """ num_images = features[0].size(0) grid_sizes = [feature_map.shape[-2:] for feature_map in features] anchors_over_all_feature_maps = self.grid_anchors(grid_sizes) anchors_over_all_feature_maps = torch.stack(anchors_over_all_feature_maps) return anchors_over_all_feature_maps.unsqueeze(0).repeat_interleave(num_images, dim=0) class RPNHead(nn.Module): """ RPN classification and regression heads. Uses a 3x3 conv to produce a shared hidden state from which one 1x1 conv predicts objectness logits for each anchor and a second 1x1 conv predicts bounding-box deltas specifying how to deform each anchor into an object proposal. """ def __init__(self, cfg, input_shape: List[ShapeSpec]): super().__init__() # Standard RPN is shared across levels: in_channels = [s.channels for s in input_shape] assert len(set(in_channels)) == 1, "Each level must have the same channel!" in_channels = in_channels[0] anchor_generator = AnchorGenerator(cfg, input_shape) num_cell_anchors = anchor_generator.num_cell_anchors box_dim = anchor_generator.box_dim assert len(set(num_cell_anchors)) == 1, "Each level must have the same number of cell anchors" num_cell_anchors = num_cell_anchors[0] if cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS == -1: hid_channels = in_channels else: hid_channels = cfg.PROPOSAL_GENERATOR.HIDDEN_CHANNELS # Modifications for VG in RPN (modeling/proposal_generator/rpn.py) # Use hidden dim instead fo the same dim as Res4 (in_channels) # 3x3 conv for the hidden representation self.conv = nn.Conv2d(in_channels, hid_channels, kernel_size=3, stride=1, padding=1) # 1x1 conv for predicting objectness logits self.objectness_logits = nn.Conv2d(hid_channels, num_cell_anchors, kernel_size=1, stride=1) # 1x1 conv for predicting box2box transform deltas self.anchor_deltas = nn.Conv2d(hid_channels, num_cell_anchors * box_dim, kernel_size=1, stride=1) for layer in [self.conv, self.objectness_logits, self.anchor_deltas]: nn.init.normal_(layer.weight, std=0.01) nn.init.constant_(layer.bias, 0) def forward(self, features): """ Args: features (list[Tensor]): list of feature maps """ pred_objectness_logits = [] pred_anchor_deltas = [] for x in features: t = nn.functional.relu(self.conv(x)) pred_objectness_logits.append(self.objectness_logits(t)) pred_anchor_deltas.append(self.anchor_deltas(t)) return pred_objectness_logits, pred_anchor_deltas class RPN(nn.Module): """ Region Proposal Network, introduced by the Faster R-CNN paper. """ def __init__(self, cfg, input_shape: Dict[str, ShapeSpec]): super().__init__() self.min_box_side_len = cfg.PROPOSAL_GENERATOR.MIN_SIZE self.in_features = cfg.RPN.IN_FEATURES self.nms_thresh = cfg.RPN.NMS_THRESH self.batch_size_per_image = cfg.RPN.BATCH_SIZE_PER_IMAGE self.positive_fraction = cfg.RPN.POSITIVE_FRACTION self.smooth_l1_beta = cfg.RPN.SMOOTH_L1_BETA self.loss_weight = cfg.RPN.LOSS_WEIGHT self.pre_nms_topk = { True: cfg.RPN.PRE_NMS_TOPK_TRAIN, False: cfg.RPN.PRE_NMS_TOPK_TEST, } self.post_nms_topk = { True: cfg.RPN.POST_NMS_TOPK_TRAIN, False: cfg.RPN.POST_NMS_TOPK_TEST, } self.boundary_threshold = cfg.RPN.BOUNDARY_THRESH self.anchor_generator = AnchorGenerator(cfg, [input_shape[f] for f in self.in_features]) self.box2box_transform = Box2BoxTransform(weights=cfg.RPN.BBOX_REG_WEIGHTS) self.anchor_matcher = Matcher( cfg.RPN.IOU_THRESHOLDS, cfg.RPN.IOU_LABELS, allow_low_quality_matches=True, ) self.rpn_head = RPNHead(cfg, [input_shape[f] for f in self.in_features]) def training(self, images, image_shapes, features, gt_boxes): pass def inference(self, outputs, images, image_shapes, features, gt_boxes=None): outputs = find_top_rpn_proposals( outputs.predict_proposals(), outputs.predict_objectness_logits(), images, image_shapes, self.nms_thresh, self.pre_nms_topk[self.training], self.post_nms_topk[self.training], self.min_box_side_len, self.training, ) results = [] for img in outputs: im_boxes, img_box_logits = img img_box_logits, inds = img_box_logits.sort(descending=True) im_boxes = im_boxes[inds] results.append((im_boxes, img_box_logits)) (proposal_boxes, logits) = tuple(map(list, zip(*results))) return proposal_boxes, logits def forward(self, images, image_shapes, features, gt_boxes=None): """ Args: images (torch.Tensor): input images of length `N` features (dict[str: Tensor]) gt_instances """ # features is dict, key = block level, v = feature_map features = [features[f] for f in self.in_features] pred_objectness_logits, pred_anchor_deltas = self.rpn_head(features) anchors = self.anchor_generator(features) outputs = RPNOutputs( self.box2box_transform, self.anchor_matcher, self.batch_size_per_image, self.positive_fraction, images, pred_objectness_logits, pred_anchor_deltas, anchors, self.boundary_threshold, gt_boxes, self.smooth_l1_beta, ) # For RPN-only models, the proposals are the final output if self.training: raise NotImplementedError() return self.training(outputs, images, image_shapes, features, gt_boxes) else: return self.inference(outputs, images, image_shapes, features, gt_boxes) class FastRCNNOutputLayers(nn.Module): """ Two linear layers for predicting Fast R-CNN outputs: (1) proposal-to-detection box regression deltas (2) classification scores """ def __init__( self, input_size, num_classes, cls_agnostic_bbox_reg, box_dim=4, use_attr=False, num_attrs=-1, ): """ Args: input_size (int): channels, or (channels, height, width) num_classes (int) cls_agnostic_bbox_reg (bool) box_dim (int) """ super().__init__() if not isinstance(input_size, int): input_size = np.prod(input_size) # (do + 1 for background class) self.cls_score = nn.Linear(input_size, num_classes + 1) num_bbox_reg_classes = 1 if cls_agnostic_bbox_reg else num_classes self.bbox_pred = nn.Linear(input_size, num_bbox_reg_classes * box_dim) self.use_attr = use_attr if use_attr: """ Modifications for VG in RoI heads Embedding: {num_classes + 1} --> {input_size // 8} Linear: {input_size + input_size // 8} --> {input_size // 4} Linear: {input_size // 4} --> {num_attrs + 1} """ self.cls_embedding = nn.Embedding(num_classes + 1, input_size // 8) self.fc_attr = nn.Linear(input_size + input_size // 8, input_size // 4) self.attr_score = nn.Linear(input_size // 4, num_attrs + 1) nn.init.normal_(self.cls_score.weight, std=0.01) nn.init.normal_(self.bbox_pred.weight, std=0.001) for item in [self.cls_score, self.bbox_pred]: nn.init.constant_(item.bias, 0) def forward(self, roi_features): if roi_features.dim() > 2: roi_features = torch.flatten(roi_features, start_dim=1) scores = self.cls_score(roi_features) proposal_deltas = self.bbox_pred(roi_features) if self.use_attr: _, max_class = scores.max(-1) # [b, c] --> [b] cls_emb = self.cls_embedding(max_class) # [b] --> [b, 256] roi_features = torch.cat([roi_features, cls_emb], -1) # [b, 2048] + [b, 256] --> [b, 2304] roi_features = self.fc_attr(roi_features) roi_features = nn.functional.relu(roi_features) attr_scores = self.attr_score(roi_features) return scores, attr_scores, proposal_deltas else: return scores, proposal_deltas class GeneralizedRCNN(nn.Module): def __init__(self, cfg): super().__init__() self.device = torch.device(cfg.MODEL.DEVICE) self.backbone = build_backbone(cfg) self.proposal_generator = RPN(cfg, self.backbone.output_shape()) self.roi_heads = Res5ROIHeads(cfg, self.backbone.output_shape()) self.roi_outputs = ROIOutputs(cfg) self.to(self.device) @classmethod def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): config = kwargs.pop("config", None) state_dict = kwargs.pop("state_dict", None) cache_dir = kwargs.pop("cache_dir", None) from_tf = kwargs.pop("from_tf", False) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) use_cdn = kwargs.pop("use_cdn", True) # Load config if we don't provide a configuration if not isinstance(config, Config): config_path = config if config is not None else pretrained_model_name_or_path # try: config = Config.from_pretrained( config_path, cache_dir=cache_dir, force_download=force_download, resume_download=resume_download, proxies=proxies, local_files_only=local_files_only, ) # Load model if pretrained_model_name_or_path is not None: if os.path.isdir(pretrained_model_name_or_path): if os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)): # Load from a PyTorch checkpoint archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) else: raise EnvironmentError( "Error no file named {} found in directory {} ".format( WEIGHTS_NAME, pretrained_model_name_or_path, ) ) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): archive_file = pretrained_model_name_or_path elif os.path.isfile(pretrained_model_name_or_path + ".index"): assert ( from_tf ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format( pretrained_model_name_or_path + ".index" ) archive_file = pretrained_model_name_or_path + ".index" else: archive_file = hf_bucket_url( pretrained_model_name_or_path, filename=WEIGHTS_NAME, use_cdn=use_cdn, ) try: # Load from URL or cache if already cached resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) if resolved_archive_file is None: raise EnvironmentError except EnvironmentError: msg = f"Can't load weights for '{pretrained_model_name_or_path}'." raise EnvironmentError(msg) if resolved_archive_file == archive_file: print("loading weights file {}".format(archive_file)) else: print("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file)) else: resolved_archive_file = None # Instantiate model. model = cls(config) if state_dict is None: try: try: state_dict = torch.load(resolved_archive_file, map_location="cpu") except Exception: state_dict = load_checkpoint(resolved_archive_file) except Exception: raise OSError( "Unable to load weights from pytorch checkpoint file. " "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. " ) missing_keys = [] unexpected_keys = [] error_msgs = [] # Convert old format to new format if needed from a PyTorch state_dict old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if "gamma" in key: new_key = key.replace("gamma", "weight") if "beta" in key: new_key = key.replace("beta", "bias") if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, "_metadata", None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata model_to_load = model model_to_load.load_state_dict(state_dict) if model.__class__.__name__ != model_to_load.__class__.__name__: base_model_state_dict = model_to_load.state_dict().keys() head_model_state_dict_without_base_prefix = [ key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys() ] missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict) if len(unexpected_keys) > 0: print( f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when" f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are" f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or" " with another architecture (e.g. initializing a BertForSequenceClassification model from a" " BertForPreTraining model).\n- This IS NOT expected if you are initializing" f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical" " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." ) else: print(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n") if len(missing_keys) > 0: print( f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at" f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably" " TRAIN this model on a down-stream task to be able to use it for predictions and inference." ) else: print( f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at" f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint" f" was trained on, you can already use {model.__class__.__name__} for predictions without further" " training." ) if len(error_msgs) > 0: raise RuntimeError( "Error(s) in loading state_dict for {}:\n\t{}".format( model.__class__.__name__, "\n\t".join(error_msgs) ) ) # Set model in evaluation mode to deactivate DropOut modules by default model.eval() return model def forward( self, images, image_shapes, gt_boxes=None, proposals=None, scales_yx=None, **kwargs, ): """ kwargs: max_detections (int), return_tensors {"np", "pt", None}, padding {None, "max_detections"}, pad_value (int), location = {"cuda", "cpu"} """ if self.training: raise NotImplementedError() return self.inference( images=images, image_shapes=image_shapes, gt_boxes=gt_boxes, proposals=proposals, scales_yx=scales_yx, **kwargs, ) @torch.no_grad() def inference( self, images, image_shapes, gt_boxes=None, proposals=None, scales_yx=None, **kwargs, ): # run images through backbone original_sizes = image_shapes * scales_yx features = self.backbone(images) # generate proposals if none are available if proposals is None: proposal_boxes, _ = self.proposal_generator(images, image_shapes, features, gt_boxes) else: assert proposals is not None # pool object features from either gt_boxes, or from proposals obj_logits, attr_logits, box_deltas, feature_pooled = self.roi_heads(features, proposal_boxes, gt_boxes) # prepare FRCNN Outputs and select top proposals boxes, classes, class_probs, attrs, attr_probs, roi_features = self.roi_outputs( obj_logits=obj_logits, attr_logits=attr_logits, box_deltas=box_deltas, pred_boxes=proposal_boxes, features=feature_pooled, sizes=image_shapes, scales=scales_yx, ) # will we pad??? subset_kwargs = { "max_detections": kwargs.get("max_detections", None), "return_tensors": kwargs.get("return_tensors", None), "pad_value": kwargs.get("pad_value", 0), "padding": kwargs.get("padding", None), } preds_per_image = torch.tensor([p.size(0) for p in boxes]) boxes = pad_list_tensors(boxes, preds_per_image, **subset_kwargs) classes = pad_list_tensors(classes, preds_per_image, **subset_kwargs) class_probs = pad_list_tensors(class_probs, preds_per_image, **subset_kwargs) attrs = pad_list_tensors(attrs, preds_per_image, **subset_kwargs) attr_probs = pad_list_tensors(attr_probs, preds_per_image, **subset_kwargs) roi_features = pad_list_tensors(roi_features, preds_per_image, **subset_kwargs) subset_kwargs["padding"] = None preds_per_image = pad_list_tensors(preds_per_image, None, **subset_kwargs) sizes = pad_list_tensors(image_shapes, None, **subset_kwargs) normalized_boxes = norm_box(boxes, original_sizes) return OrderedDict( { "obj_ids": classes, "obj_probs": class_probs, "attr_ids": attrs, "attr_probs": attr_probs, "boxes": boxes, "sizes": sizes, "preds_per_image": preds_per_image, "roi_features": roi_features, "normalized_boxes": normalized_boxes, } )
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/visualizing_image.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import colorsys import io import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import numpy as np import torch from matplotlib.backends.backend_agg import FigureCanvasAgg from utils import img_tensorize _SMALL_OBJ = 1000 class SingleImageViz: def __init__( self, img, scale=1.2, edgecolor="g", alpha=0.5, linestyle="-", saveas="test_out.jpg", rgb=True, pynb=False, id2obj=None, id2attr=None, pad=0.7, ): """ img: an RGB image of shape (H, W, 3). """ if isinstance(img, torch.Tensor): img = img.numpy().astype("np.uint8") if isinstance(img, str): img = img_tensorize(img) assert isinstance(img, np.ndarray) width, height = img.shape[1], img.shape[0] fig = mplfigure.Figure(frameon=False) dpi = fig.get_dpi() width_in = (width * scale + 1e-2) / dpi height_in = (height * scale + 1e-2) / dpi fig.set_size_inches(width_in, height_in) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") ax.set_xlim(0.0, width) ax.set_ylim(height) self.saveas = saveas self.rgb = rgb self.pynb = pynb self.img = img self.edgecolor = edgecolor self.alpha = 0.5 self.linestyle = linestyle self.font_size = int(np.sqrt(min(height, width)) * scale // 3) self.width = width self.height = height self.scale = scale self.fig = fig self.ax = ax self.pad = pad self.id2obj = id2obj self.id2attr = id2attr self.canvas = FigureCanvasAgg(fig) def add_box(self, box, color=None): if color is None: color = self.edgecolor (x0, y0, x1, y1) = box width = x1 - x0 height = y1 - y0 self.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=color, linewidth=self.font_size // 3, alpha=self.alpha, linestyle=self.linestyle, ) ) def draw_boxes(self, boxes, obj_ids=None, obj_scores=None, attr_ids=None, attr_scores=None): if len(boxes.shape) > 2: boxes = boxes[0] if len(obj_ids.shape) > 1: obj_ids = obj_ids[0] if len(obj_scores.shape) > 1: obj_scores = obj_scores[0] if len(attr_ids.shape) > 1: attr_ids = attr_ids[0] if len(attr_scores.shape) > 1: attr_scores = attr_scores[0] if isinstance(boxes, torch.Tensor): boxes = boxes.numpy() if isinstance(boxes, list): boxes = np.array(boxes) assert isinstance(boxes, np.ndarray) areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) sorted_idxs = np.argsort(-areas).tolist() boxes = boxes[sorted_idxs] if boxes is not None else None obj_ids = obj_ids[sorted_idxs] if obj_ids is not None else None obj_scores = obj_scores[sorted_idxs] if obj_scores is not None else None attr_ids = attr_ids[sorted_idxs] if attr_ids is not None else None attr_scores = attr_scores[sorted_idxs] if attr_scores is not None else None assigned_colors = [self._random_color(maximum=1) for _ in range(len(boxes))] assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] if obj_ids is not None: labels = self._create_text_labels_attr(obj_ids, obj_scores, attr_ids, attr_scores) for i in range(len(boxes)): color = assigned_colors[i] self.add_box(boxes[i], color) self.draw_labels(labels[i], boxes[i], color) def draw_labels(self, label, box, color): x0, y0, x1, y1 = box text_pos = (x0, y0) instance_area = (y1 - y0) * (x1 - x0) small = _SMALL_OBJ * self.scale if instance_area < small or y1 - y0 < 40 * self.scale: if y1 >= self.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.height * self.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) font_size *= 0.75 * self.font_size self.draw_text( text=label, position=text_pos, color=lighter_color, ) def draw_text( self, text, position, color="g", ha="left", ): rotation = 0 font_size = self.font_size color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) bbox = { "facecolor": "black", "alpha": self.alpha, "pad": self.pad, "edgecolor": "none", } x, y = position self.ax.text( x, y, text, size=font_size * self.scale, family="sans-serif", bbox=bbox, verticalalignment="top", horizontalalignment=ha, color=color, zorder=10, rotation=rotation, ) def save(self, saveas=None): if saveas is None: saveas = self.saveas if saveas.lower().endswith(".jpg") or saveas.lower().endswith(".png"): cv2.imwrite( saveas, self._get_buffer()[:, :, ::-1], ) else: self.fig.savefig(saveas) def _create_text_labels_attr(self, classes, scores, attr_classes, attr_scores): labels = [self.id2obj[i] for i in classes] attr_labels = [self.id2attr[i] for i in attr_classes] labels = [ f"{label} {score:.2f} {attr} {attr_score:.2f}" for label, score, attr, attr_score in zip(labels, scores, attr_labels, attr_scores) ] return labels def _create_text_labels(self, classes, scores): labels = [self.id2obj[i] for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(li, s * 100) for li, s in zip(labels, scores)] return labels def _random_color(self, maximum=255): idx = np.random.randint(0, len(_COLORS)) ret = _COLORS[idx] * maximum if not self.rgb: ret = ret[::-1] return ret def _get_buffer(self): if not self.pynb: s, (width, height) = self.canvas.print_to_buffer() if (width, height) != (self.width, self.height): img = cv2.resize(self.img, (width, height)) else: img = self.img else: buf = io.BytesIO() # works for cairo backend self.canvas.print_rgba(buf) width, height = self.width, self.height s = buf.getvalue() img = self.img buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) try: import numexpr as ne # fuse them with numexpr visualized_image = ne.evaluate("img * (1 - alpha / 255.0) + rgb * (alpha / 255.0)") except ImportError: alpha = alpha.astype("float32") / 255.0 visualized_image = img * (1 - alpha) + rgb * alpha return visualized_image.astype("uint8") def _change_color_brightness(self, color, brightness_factor): assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return modified_color # Color map _COLORS = ( np.array( [ 0.000, 0.447, 0.741, 0.850, 0.325, 0.098, 0.929, 0.694, 0.125, 0.494, 0.184, 0.556, 0.466, 0.674, 0.188, 0.301, 0.745, 0.933, 0.635, 0.078, 0.184, 0.300, 0.300, 0.300, 0.600, 0.600, 0.600, 1.000, 0.000, 0.000, 1.000, 0.500, 0.000, 0.749, 0.749, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 1.000, 0.667, 0.000, 1.000, 0.333, 0.333, 0.000, 0.333, 0.667, 0.000, 0.333, 1.000, 0.000, 0.667, 0.333, 0.000, 0.667, 0.667, 0.000, 0.667, 1.000, 0.000, 1.000, 0.333, 0.000, 1.000, 0.667, 0.000, 1.000, 1.000, 0.000, 0.000, 0.333, 0.500, 0.000, 0.667, 0.500, 0.000, 1.000, 0.500, 0.333, 0.000, 0.500, 0.333, 0.333, 0.500, 0.333, 0.667, 0.500, 0.333, 1.000, 0.500, 0.667, 0.000, 0.500, 0.667, 0.333, 0.500, 0.667, 0.667, 0.500, 0.667, 1.000, 0.500, 1.000, 0.000, 0.500, 1.000, 0.333, 0.500, 1.000, 0.667, 0.500, 1.000, 1.000, 0.500, 0.000, 0.333, 1.000, 0.000, 0.667, 1.000, 0.000, 1.000, 1.000, 0.333, 0.000, 1.000, 0.333, 0.333, 1.000, 0.333, 0.667, 1.000, 0.333, 1.000, 1.000, 0.667, 0.000, 1.000, 0.667, 0.333, 1.000, 0.667, 0.667, 1.000, 0.667, 1.000, 1.000, 1.000, 0.000, 1.000, 1.000, 0.333, 1.000, 1.000, 0.667, 1.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.167, 0.000, 0.000, 0.333, 0.000, 0.000, 0.500, 0.000, 0.000, 0.667, 0.000, 0.000, 0.833, 0.000, 0.000, 1.000, 0.000, 0.000, 0.000, 0.143, 0.143, 0.143, 0.857, 0.857, 0.857, 1.000, 1.000, 1.000, ] ) .astype(np.float32) .reshape(-1, 3) )
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/lxmert/utils.py
""" coding=utf-8 Copyright 2018, Antonio Mendoza Hao Tan, Mohit Bansal, Huggingface team :) Adapted From Facebook Inc, Detectron2 Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.import copy """ import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import sha256 from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cv2 import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch _torch_available = True except ImportError: _torch_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) default_cache_path = os.path.join(torch_cache_home, "transformers") CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" PATH = "/".join(str(Path(__file__).resolve()).split("/")[:-1]) CONFIG = os.path.join(PATH, "config.yaml") ATTRIBUTES = os.path.join(PATH, "attributes.txt") OBJECTS = os.path.join(PATH, "objects.txt") PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) WEIGHTS_NAME = "pytorch_model.bin" CONFIG_NAME = "config.yaml" def load_labels(objs=OBJECTS, attrs=ATTRIBUTES): vg_classes = [] with open(objs) as f: for object in f.readlines(): vg_classes.append(object.split(",")[0].lower().strip()) vg_attrs = [] with open(attrs) as f: for object in f.readlines(): vg_attrs.append(object.split(",")[0].lower().strip()) return vg_classes, vg_attrs def load_checkpoint(ckp): r = OrderedDict() with open(ckp, "rb") as f: ckp = pkl.load(f)["model"] for k in copy.deepcopy(list(ckp.keys())): v = ckp.pop(k) if isinstance(v, np.ndarray): v = torch.tensor(v) else: assert isinstance(v, torch.tensor), type(v) r[k] = v return r class Config: _pointer = {} def __init__(self, dictionary: dict, name: str = "root", level=0): self._name = name self._level = level d = {} for k, v in dictionary.items(): if v is None: raise ValueError() k = copy.deepcopy(k) v = copy.deepcopy(v) if isinstance(v, dict): v = Config(v, name=k, level=level + 1) d[k] = v setattr(self, k, v) self._pointer = d def __repr__(self): return str(list((self._pointer.keys()))) def __setattr__(self, key, val): self.__dict__[key] = val self.__dict__[key.upper()] = val levels = key.split(".") last_level = len(levels) - 1 pointer = self._pointer if len(levels) > 1: for i, l in enumerate(levels): if hasattr(self, l) and isinstance(getattr(self, l), Config): setattr(getattr(self, l), ".".join(levels[i:]), val) if l == last_level: pointer[l] = val else: pointer = pointer[l] def to_dict(self): return self._pointer def dump_yaml(self, data, file_name): with open(f"{file_name}", "w") as stream: dump(data, stream) def dump_json(self, data, file_name): with open(f"{file_name}", "w") as stream: json.dump(data, stream) @staticmethod def load_yaml(config): with open(config) as stream: data = load(stream, Loader=Loader) return data def __str__(self): t = " " if self._name != "root": r = f"{t * (self._level-1)}{self._name}:\n" else: r = "" level = self._level for i, (k, v) in enumerate(self._pointer.items()): if isinstance(v, Config): r += f"{t * (self._level)}{v}\n" self._level += 1 else: r += f"{t * (self._level)}{k}: {v} ({type(v).__name__})\n" self._level = level return r[:-1] @classmethod def from_pretrained(cls, pretrained_model_name_or_path: str, **kwargs): config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs) return cls(config_dict) @classmethod def get_config_dict(cls, pretrained_model_name_or_path: str, **kwargs): cache_dir = kwargs.pop("cache_dir", None) force_download = kwargs.pop("force_download", False) resume_download = kwargs.pop("resume_download", False) proxies = kwargs.pop("proxies", None) local_files_only = kwargs.pop("local_files_only", False) if os.path.isdir(pretrained_model_name_or_path): config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME) elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path): config_file = pretrained_model_name_or_path else: config_file = hf_bucket_url(pretrained_model_name_or_path, filename=CONFIG_NAME, use_cdn=False) try: # Load from URL or cache if already cached resolved_config_file = cached_path( config_file, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, local_files_only=local_files_only, ) # Load config dict if resolved_config_file is None: raise EnvironmentError config_file = Config.load_yaml(resolved_config_file) except EnvironmentError: msg = "Can't load config for" raise EnvironmentError(msg) if resolved_config_file == config_file: print("loading configuration file from path") else: print("loading configuration file cache") return Config.load_yaml(resolved_config_file), kwargs # quick compare tensors def compare(in_tensor): out_tensor = torch.load("dump.pt", map_location=in_tensor.device) n1 = in_tensor.numpy() n2 = out_tensor.numpy()[0] print(n1.shape, n1[0, 0, :5]) print(n2.shape, n2[0, 0, :5]) assert np.allclose(n1, n2, rtol=0.01, atol=0.1), ( f"{sum([1 for x in np.isclose(n1, n2, rtol=0.01, atol=0.1).flatten() if x is False])/len(n1.flatten())*100:.4f} %" " element-wise mismatch" ) raise Exception("tensors are all good") # Hugging face functions below def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX legacy_format = "/" not in model_id if legacy_format: return f"{endpoint}/{model_id}-{filename}" else: return f"{endpoint}/{model_id}/{filename}" def http_get( url, temp_file, proxies=None, resume_size=0, user_agent=None, ): ua = "python/{}".format(sys.version.split()[0]) if _torch_available: ua += "; torch/{}".format(torch.__version__) if isinstance(user_agent, dict): ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent headers = {"user-agent": ua} if resume_size > 0: headers["Range"] = "bytes=%d-" % (resume_size,) response = requests.get(url, stream=True, proxies=proxies, headers=headers) if response.status_code == 416: # Range not satisfiable return content_length = response.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None progress = tqdm( unit="B", unit_scale=True, total=total, initial=resume_size, desc="Downloading", ) for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent=None, local_files_only=False, ): if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) etag = None if not local_files_only: try: response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) if response.status_code == 200: etag = response.headers.get("ETag") except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(cache_path): return cache_path else: matching_files = [ file for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*") if not file.endswith(".json") and not file.endswith(".lock") ] if len(matching_files) > 0: return os.path.join(cache_dir, matching_files[-1]) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # If the download just completed while the lock was activated. if os.path.exists(cache_path) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: incomplete_path = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(incomplete_path, "a+b") as f: yield f temp_file_manager = _resumable_file_manager if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size else: resume_size = 0 else: temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) resume_size = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( "%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name, ) http_get( url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent, ) os.replace(temp_file.name, cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: json.dump(meta, meta_file) return cache_path def url_to_filename(url, etag=None): url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".h5"): filename += ".h5" return filename def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent=None, extract_compressed_file=False, force_extract=False, local_files_only=False, ): if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) return output_path_extracted return output_path def get_data(query, delim=","): assert isinstance(query, str) if os.path.isfile(query): with open(query) as f: data = eval(f.read()) else: req = requests.get(query) try: data = requests.json() except Exception: data = req.content.decode() assert data is not None, "could not connect" try: data = eval(data) except Exception: data = data.split("\n") req.close() return data def get_image_from_url(url): response = requests.get(url) img = np.array(Image.open(BytesIO(response.content))) return img # to load legacy frcnn checkpoint from detectron def load_frcnn_pkl_from_url(url): fn = url.split("/")[-1] if fn not in os.listdir(os.getcwd()): wget.download(url) with open(fn, "rb") as stream: weights = pkl.load(stream) model = weights.pop("model") new = {} for k, v in model.items(): new[k] = torch.from_numpy(v) if "running_var" in k: zero = torch.tensor([0]) k2 = k.replace("running_var", "num_batches_tracked") new[k2] = zero return new def get_demo_path(): print(f"{os.path.abspath(os.path.join(PATH, os.pardir))}/demo.ipynb") def img_tensorize(im, input_format="RGB"): assert isinstance(im, str) if os.path.isfile(im): img = cv2.imread(im) else: img = get_image_from_url(im) assert img is not None, f"could not connect to: {im}" img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) if input_format == "RGB": img = img[:, :, ::-1] return img def chunk(images, batch=1): return (images[i : i + batch] for i in range(0, len(images), batch))
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/zero-shot-distillation/distill_classifier.py
import logging import os import sys from dataclasses import dataclass, field from typing import List, Optional import torch from datasets import Dataset from torch import nn from tqdm.auto import tqdm from transformers import ( AutoModelForSequenceClassification, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, set_seed, utils, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process DESCRIPTION = """ Distills an NLI-based zero-shot classifier to a smaller, more efficient model with a fixed set of candidate class names. Useful for speeding up zero-shot classification in cases where labeled training data is not available, but when only a single fixed set of classes is needed. Takes a teacher NLI model, student classifier model, unlabeled dataset, and set of K possible class names. Yields a single classifier with K outputs corresponding to the provided class names. """ logger = logging.getLogger(__name__) @dataclass class TeacherModelArguments: teacher_name_or_path: Optional[str] = field( default="roberta-large-mnli", metadata={"help": "The NLI/zero-shot teacher model to be distilled."} ) hypothesis_template: Optional[str] = field( default="This example is {}.", metadata={ "help": ( "Template used to turn class names into mock hypotheses for teacher NLI model. Must include {{}}" "where class name is inserted." ) }, ) teacher_batch_size: Optional[int] = field( default=32, metadata={"help": "Batch size for generating teacher predictions."} ) multi_label: Optional[bool] = field( default=False, metadata={ "help": ( "Allow multiple classes to be true rather than forcing them to sum to 1 (sometimes called" "multi-class multi-label classification)." ) }, ) temperature: Optional[float] = field( default=1.0, metadata={"help": "Temperature applied to teacher softmax for distillation."} ) @dataclass class StudentModelArguments: student_name_or_path: Optional[str] = field( default="distilbert-base-uncased", metadata={"help": "The NLI/zero-shot teacher model to be distilled."} ) @dataclass class DataTrainingArguments: data_file: str = field(metadata={"help": "Text file with one unlabeled instance per line."}) class_names_file: str = field(metadata={"help": "Text file with one class name per line."}) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the Rust tokenizers library) or not."}, ) @dataclass class DistillTrainingArguments(TrainingArguments): output_dir: Optional[str] = field( default=None, metadata={"help": "The output directory where the model predictions and checkpoints will be written."}, ) per_device_train_batch_size: int = field( default=32, metadata={"help": "Batch size per GPU/TPU core/CPU for training."} ) per_device_eval_batch_size: int = field( default=128, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."} ) num_train_epochs: float = field(default=1.0, metadata={"help": "Total number of training epochs to perform."}) do_train: bool = field(default=True, metadata={"help": "Whether to run training of student model."}) do_eval: bool = field( default=True, metadata={ "help": ( "Whether to evaluate the agreement of the final student predictions and the teacher predictions" "after training." ) }, ) save_total_limit: Optional[int] = field( default=0, metadata={ "help": ( "Limit the total amount of checkpoints." "Deletes the older checkpoints in the output_dir. Default is 0 (no checkpoints)." ) }, ) class DistillationTrainer(Trainer): def compute_loss(self, model, inputs, return_outputs=False): target_p = inputs["labels"] outputs = model(inputs["input_ids"], attention_mask=inputs["attention_mask"]) logits = outputs[0] loss = -torch.sum(target_p * logits.log_softmax(dim=-1), axis=-1).mean() if return_outputs: return loss, outputs return loss def read_lines(path): lines = [] with open(path, "r") as f: for line in f: line = line.strip() if len(line) > 0: lines.append(line) return lines def get_premise_hypothesis_pairs(examples, class_names, hypothesis_template): premises = [] hypotheses = [] for example in examples: for name in class_names: premises.append(example) hypotheses.append(hypothesis_template.format(name)) return premises, hypotheses def get_entailment_id(config): for label, ind in config.label2id.items(): if label.lower().startswith("entail"): return ind logger.warning("Could not identify entailment dimension from teacher config label2id. Setting to -1.") return -1 def get_teacher_predictions( model_path: str, examples: List[str], class_names: List[str], hypothesis_template: str, batch_size: int, temperature: float, multi_label: bool, use_fast_tokenizer: bool, no_cuda: bool, fp16: bool, ): """ Gets predictions by the same method as the zero-shot pipeline but with DataParallel & more efficient batching """ model = AutoModelForSequenceClassification.from_pretrained(model_path) model_config = model.config if not no_cuda and torch.cuda.is_available(): model = nn.DataParallel(model.cuda()) batch_size *= len(model.device_ids) tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=use_fast_tokenizer) premises, hypotheses = get_premise_hypothesis_pairs(examples, class_names, hypothesis_template) logits = [] for i in tqdm(range(0, len(premises), batch_size)): batch_premises = premises[i : i + batch_size] batch_hypotheses = hypotheses[i : i + batch_size] encodings = tokenizer( batch_premises, batch_hypotheses, padding=True, truncation="only_first", return_tensors="pt", ) with torch.cuda.amp.autocast(enabled=fp16): with torch.no_grad(): outputs = model(**encodings) logits.append(outputs.logits.detach().cpu().float()) entail_id = get_entailment_id(model_config) contr_id = -1 if entail_id == 0 else 0 logits = torch.cat(logits, dim=0) # N*K x 3 nli_logits = logits.reshape(len(examples), len(class_names), -1)[..., [contr_id, entail_id]] # N x K x 2 if multi_label: # softmax over (contr, entail) logits for each class independently nli_prob = (nli_logits / temperature).softmax(-1) else: # softmax over entail logits across classes s.t. class probabilities sum to 1. nli_prob = (nli_logits / temperature).softmax(1) return nli_prob[..., 1] # N x K def main(): parser = HfArgumentParser( (DataTrainingArguments, TeacherModelArguments, StudentModelArguments, DistillTrainingArguments), description=DESCRIPTION, ) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. data_args, teacher_args, student_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1]) ) else: data_args, teacher_args, student_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): utils.logging.set_verbosity_info() utils.logging.enable_default_handler() utils.logging.enable_explicit_format() if training_args.local_rank != -1: raise ValueError("Distributed training is not currently supported.") if training_args.tpu_num_cores is not None: raise ValueError("TPU acceleration is not currently supported.") logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # 1. read in data examples = read_lines(data_args.data_file) class_names = read_lines(data_args.class_names_file) # 2. get teacher predictions and load into dataset logger.info("Generating predictions from zero-shot teacher model") teacher_soft_preds = get_teacher_predictions( teacher_args.teacher_name_or_path, examples, class_names, teacher_args.hypothesis_template, teacher_args.teacher_batch_size, teacher_args.temperature, teacher_args.multi_label, data_args.use_fast_tokenizer, training_args.no_cuda, training_args.fp16, ) dataset = Dataset.from_dict( { "text": examples, "labels": teacher_soft_preds, } ) # 3. create student logger.info("Initializing student model") model = AutoModelForSequenceClassification.from_pretrained( student_args.student_name_or_path, num_labels=len(class_names) ) tokenizer = AutoTokenizer.from_pretrained(student_args.student_name_or_path, use_fast=data_args.use_fast_tokenizer) model.config.id2label = dict(enumerate(class_names)) model.config.label2id = {label: i for i, label in enumerate(class_names)} # 4. train student on teacher predictions dataset = dataset.map(tokenizer, input_columns="text") dataset.set_format("torch") def compute_metrics(p, return_outputs=False): preds = p.predictions.argmax(-1) proxy_labels = p.label_ids.argmax(-1) # "label_ids" are actually distributions return {"agreement": (preds == proxy_labels).mean().item()} trainer = DistillationTrainer( model=model, tokenizer=tokenizer, args=training_args, train_dataset=dataset, compute_metrics=compute_metrics, ) if training_args.do_train: logger.info("Training student model on teacher predictions") trainer.train() if training_args.do_eval: agreement = trainer.evaluate(eval_dataset=dataset)["eval_agreement"] logger.info(f"Agreement of student and teacher predictions: {agreement * 100:0.2f}%") trainer.save_model() if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/zero-shot-distillation/README.md
# Zero-shot classifier distillation Author: @joeddav This script provides a way to improve the speed and memory performance of a zero-shot classifier by training a more efficient student model from the zero-shot teacher's predictions over an unlabeled dataset. The zero-shot classification pipeline uses a model pre-trained on natural language inference (NLI) to determine the compatibility of a set of candidate class names with a given sequence. This serves as a convenient out-of-the-box classifier without the need for labeled training data. However, for a given sequence, the method requires each possible label to be fed through the large NLI model separately. Thus for `N` sequences and `K` classes, a total of `N*K` forward passes through the model are required. This requirement slows inference considerably, particularly as `K` grows. Given (1) an unlabeled corpus and (2) a set of candidate class names, the provided script trains a student model with a standard classification head with `K` output dimensions. The resulting student model can then be used for classifying novel text instances with a significant boost in speed and memory performance while retaining similar classification performance to the original zero-shot model ### Usage A teacher NLI model can be distilled to a more efficient student model by running [`distill_classifier.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/zero-shot-distillation/distill_classifier.py): ``` python distill_classifier.py \ --data_file <unlabeled_data.txt> \ --class_names_file <class_names.txt> \ --output_dir <output_dir> ``` `<unlabeled_data.txt>` should be a text file with a single unlabeled example per line. `<class_names.txt>` is a text file with one class name per line. Other optional arguments include: - `--teacher_name_or_path` (default: `roberta-large-mnli`): The name or path of the NLI teacher model. - `--student_name_or_path` (default: `distillbert-base-uncased`): The name or path of the student model which will be fine-tuned to copy the teacher predictions. - `--hypothesis_template` (default `"This example is {}."`): The template used to turn each label into an NLI-style hypothesis when generating teacher predictions. This template must include a `{}` or similar syntax for the candidate label to be inserted into the template. For example, the default template is `"This example is {}."` With the candidate label `sports`, this would be fed into the model like `[CLS] sequence to classify [SEP] This example is sports . [SEP]`. - `--multi_class`: Whether or not multiple candidate labels can be true. By default, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If `--multi_class` is passed, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score. This is sometimes called "multi-class multi-label" classification. - `--temperature` (default: `1.0`): The temperature applied to the softmax of the teacher model predictions. A higher temperature results in a student with smoother (lower confidence) predictions than the teacher while a value `<1` resultings in a higher-confidence, peaked distribution. The default `1.0` is equivalent to no smoothing. - `--teacher_batch_size` (default: `32`): The batch size used for generating a single set of teacher predictions. Does not affect training. Use `--per_device_train_batch_size` to change the training batch size. Any of the arguments in the 🤗 Trainer's [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.html?#trainingarguments) can also be modified, such as `--learning_rate`, `--fp16`, `--no_cuda`, `--warmup_steps`, etc. Run `python distill_classifier.py -h` for a full list of available arguments or consult the [Trainer documentation](https://huggingface.co/transformers/main_classes/trainer.html#trainingarguments). > **Note**: Distributed and TPU training are not currently supported. Single-node multi-GPU is supported, however, and will run automatically if multiple GPUs are available. ### Example: Topic classification > A full colab demo notebook of this example can be found [here](https://colab.research.google.com/drive/1mjBjd0cR8G57ZpsnFCS3ngGyo5nCa9ya?usp=sharing). Let's say we're interested in classifying news articles into one of four topic categories: "the world", "sports", "business", or "science/tech". We have an unlabeled dataset, [AG's News](https://huggingface.co/datasets/ag_news), which corresponds to this problem (in reality AG's News is annotated, but we will pretend it is not for the sake of example). We can use an NLI model like `roberta-large-mnli` for zero-shot classification like so: ```python >>> class_names = ["the world", "sports", "business", "science/tech"] >>> hypothesis_template = "This text is about {}." >>> sequence = "A new moon has been discovered in Jupiter's orbit" >>> zero_shot_classifier = pipeline("zero-shot-classification", model="roberta-large-mnli") >>> zero_shot_classifier(sequence, class_names, hypothesis_template=hypothesis_template) {'sequence': "A new moon has been discovered in Jupiter's orbit", 'labels': ['science/tech', 'the world', 'business', 'sports'], 'scores': [0.7035840153694153, 0.18744826316833496, 0.06027870625257492, 0.04868902638554573]} ``` Unfortunately, inference is slow since each of our 4 class names must be fed through the large model for every sequence to be classified. But with our unlabeled data we can distill the model to a small distilbert classifier to make future inference much faster. To run the script, we will need to put each training example (text only) from AG's News on its own line in `agnews/train_unlabeled.txt`, and each of the four class names in the newline-separated `agnews/class_names.txt`. Then we can run distillation with the following command: ```bash python distill_classifier.py \ --data_file ./agnews/unlabeled.txt \ --class_names_files ./agnews/class_names.txt \ --teacher_name_or_path roberta-large-mnli \ --hypothesis_template "This text is about {}." \ --output_dir ./agnews/distilled ``` The script will generate a set of soft zero-shot predictions from `roberta-large-mnli` for each example in `agnews/unlabeled.txt`. It will then train a student distilbert classifier on the teacher predictions and save the resulting model in `./agnews/distilled`. The resulting model can then be loaded and used like any other pre-trained classifier: ```python from transformers import AutoModelForSequenceClassification, AutoTokenizer model = AutoModelForSequenceClassification.from_pretrained("./agnews/distilled") tokenizer = AutoTokenizer.from_pretrained("./agnews/distilled") ``` and even used trivially with a `TextClassificationPipeline`: ```python >>> distilled_classifier = TextClassificationPipeline(model=model, tokenizer=tokenizer, return_all_scores=True) >>> distilled_classifier(sequence) [[{'label': 'the world', 'score': 0.14899294078350067}, {'label': 'sports', 'score': 0.03205857425928116}, {'label': 'business', 'score': 0.05943061783909798}, {'label': 'science/tech', 'score': 0.7595179080963135}]] ``` > Tip: pass `device=0` when constructing a pipeline to run on a GPU As we can see, the results of the student closely resemble that of the trainer despite never having seen this example during training. Now let's do a quick & dirty speed comparison simulating 16K examples with a batch size of 16: ```python for _ in range(1000): zero_shot_classifier([sequence] * 16, class_names) # runs in 1m 23s on a single V100 GPU ``` ```python %%time for _ in range(1000): distilled_classifier([sequence] * 16) # runs in 10.3s on a single V100 GPU ``` As we can see, the distilled student model runs an order of magnitude faster than its teacher NLI model. This is also a seeting where we only have `K=4` possible labels. The higher the number of classes for a given task, the more drastic the speedup will be, since the zero-shot teacher's complexity scales linearly with the number of classes. Since we secretly have access to ground truth labels for AG's news, we can evaluate the accuracy of each model. The original zero-shot model `roberta-large-mnli` gets an accuracy of 69.3% on the held-out test set. After training a student on the unlabeled training set, the distilled model gets a similar score of 70.4%. Lastly, you can share the distilled model with the community and/or use it with our inference API by [uploading it to the 🤗 Hub](https://huggingface.co/transformers/model_sharing.html). We've uploaded the distilled model from this example at [joeddav/distilbert-base-uncased-agnews-student](https://huggingface.co/joeddav/distilbert-base-uncased-agnews-student).
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/xtreme-s/requirements.txt
datasets >= 1.18.0 torch >= 1.5 torchaudio librosa jiwer
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/xtreme-s/README.md
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # XTREME-S benchmark examples *Maintainers: [Anton Lozhkov](https://github.com/anton-l) and [Patrick von Platen](https://github.com/patrickvonplaten)* The Cross-lingual TRansfer Evaluation of Multilingual Encoders for Speech (XTREME-S) benchmark is a benchmark designed to evaluate speech representations across languages, tasks, domains and data regimes. It covers XX typologically diverse languages and seven downstream tasks grouped in four families: speech recognition, translation, classification and retrieval. XTREME-S covers speech recognition with Fleurs, Multilingual LibriSpeech (MLS) and VoxPopuli, speech translation with CoVoST-2, speech classification with LangID (Fleurs) and intent classification (MInds-14) and finally speech(-text) retrieval with Fleurs. Each of the tasks covers a subset of the 102 languages included in XTREME-S (shown here with their ISO 3166-1 codes): afr, amh, ara, asm, ast, azj, bel, ben, bos, cat, ceb, ces, cmn, cym, dan, deu, ell, eng, spa, est, fas, ful, fin, tgl, fra, gle, glg, guj, hau, heb, hin, hrv, hun, hye, ind, ibo, isl, ita, jpn, jav, kat, kam, kea, kaz, khm, kan, kor, ckb, kir, ltz, lug, lin, lao, lit, luo, lav, mri, mkd, mal, mon, mar, msa, mlt, mya, nob, npi, nld, nso, nya, oci, orm, ory, pan, pol, pus, por, ron, rus, bul, snd, slk, slv, sna, som, srp, swe, swh, tam, tel, tgk, tha, tur, ukr, umb, urd, uzb, vie, wol, xho, yor, yue and zul. Paper: [XTREME-S: Evaluating Cross-lingual Speech Representations](https://arxiv.org/abs/2203.10752) Dataset: [https://huggingface.co/datasets/google/xtreme_s](https://huggingface.co/datasets/google/xtreme_s) ## Fine-tuning for the XTREME-S tasks Based on the [`run_xtreme_s.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/xtreme-s/run_xtreme_s.py) script. This script can fine-tune any of the pretrained speech models on the [hub](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition) on the [XTREME-S dataset](https://huggingface.co/datasets/google/xtreme_s) tasks. XTREME-S is made up of 7 different tasks. Here is how to run the script on each of them: ```bash export TASK_NAME=mls.all python run_xtreme_s.py \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --task="${TASK_NAME}" \ --output_dir="xtreme_s_xlsr_${TASK_NAME}" \ --num_train_epochs=100 \ --per_device_train_batch_size=32 \ --learning_rate="3e-4" \ --target_column_name="transcription" \ --save_steps=500 \ --eval_steps=500 \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --push_to_hub ``` where `TASK_NAME` can be one of: `mls, voxpopuli, covost2, fleurs-asr, fleurs-lang_id, minds14`. We get the following results on the test set of the benchmark's datasets. The corresponding training commands for each dataset are given in the sections below: | Task | Dataset | Result | Fine-tuned model & logs | Training time | GPUs | |-----------------------|-----------|-----------------------|--------------------------------------------------------------------|---------------|--------| | Speech Recognition | MLS | 30.33 WER | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_mls/) | 18:47:25 | 8xV100 | | Speech Recognition | VoxPopuli | - | - | - | - | | Speech Recognition | FLEURS | - | - | - | - | | Speech Translation | CoVoST-2 | - | - | - | - | | Speech Classification | Minds-14 | 90.15 F1 / 90.33 Acc. | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_minds14/) | 2:54:21 | 2xA100 | | Speech Classification | FLEURS | - | - | - | - | | Speech Retrieval | FLEURS | - | - | - | - | ### Speech Recognition with MLS The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#multilingual-librispeech-mls) using 8 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=8 \ run_xtreme_s.py \ --task="mls" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_mls" \ --overwrite_output_dir \ --num_train_epochs=100 \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=1 \ --gradient_accumulation_steps=2 \ --learning_rate="3e-4" \ --warmup_steps=3000 \ --evaluation_strategy="steps" \ --max_duration_in_seconds=20 \ --save_steps=500 \ --eval_steps=500 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="wer" \ --greater_is_better=False \ --load_best_model_at_end \ --push_to_hub ``` On 8 V100 GPUs, this script should run in ~19 hours and yield a cross-entropy loss of **0.6215** and word error rate of **30.33** ### Speech Classification with Minds-14 The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#intent-classification---minds-14) using 2 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=2 \ run_xtreme_s.py \ --task="minds14" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_minds14" \ --overwrite_output_dir \ --num_train_epochs=50 \ --per_device_train_batch_size=32 \ --per_device_eval_batch_size=8 \ --gradient_accumulation_steps=1 \ --learning_rate="3e-4" \ --warmup_steps=1500 \ --evaluation_strategy="steps" \ --max_duration_in_seconds=30 \ --save_steps=200 \ --eval_steps=200 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="f1" \ --greater_is_better=True \ --load_best_model_at_end \ --push_to_hub ``` On 2 A100 GPUs, this script should run in ~5 hours and yield a cross-entropy loss of **0.4119** and F1 score of **90.15**
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/xtreme-s/run_xtreme_s.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """ Fine-tuning a 🤗 Transformers pretrained speech model on the XTREME-S benchmark tasks""" import json import logging import os import re import sys from collections import OrderedDict, defaultdict from dataclasses import dataclass, field from typing import Dict, List, Optional, Union import datasets import numpy as np import torch from datasets import DatasetDict, load_dataset, load_metric import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, AutoModelForCTC, AutoModelForSpeechSeq2Seq, AutoProcessor, AutoTokenizer, HfArgumentParser, Seq2SeqTrainer, Seq2SeqTrainingArguments, Trainer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.18.0.dev0") require_version("datasets>=1.18.0", "To fix: pip install -r examples/pytorch/speech-recognition/requirements.txt") logger = logging.getLogger(__name__) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) TASK_TO_TARGET_COLUMN_NAME = { "fleurs-asr": "transcription", "fleurs-lang_id": "lang_id", "mls": "transcription", "voxpopuli": "transcription", "covost2": "translation", "minds14": "intent_class", "babel": "transcription", } @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) tokenizer_name_or_path: Optional[str] = field( default=None, metadata={"help": "Path to pretrained tokenizer or tokenizer identifier from huggingface.co/models"}, ) cache_dir: Optional[str] = field( default=None, metadata={ "help": "Where do you want to store the pretrained models and datasets downloaded from huggingface.co" }, ) freeze_feature_encoder: bool = field( default=True, metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) attention_dropout: float = field( default=0.0, metadata={"help": "The dropout ratio for the attention probabilities."} ) activation_dropout: float = field( default=0.0, metadata={"help": "The dropout ratio for activations inside the fully connected layer."} ) feat_proj_dropout: float = field(default=0.0, metadata={"help": "The dropout ratio for the projected features."}) hidden_dropout: float = field( default=0.0, metadata={ "help": "The dropout probability for all fully connected layers in the embeddings, encoder, and pooler." }, ) final_dropout: float = field( default=0.0, metadata={"help": "The dropout probability for the final projection layer."}, ) mask_time_prob: float = field( default=0.05, metadata={ "help": ( "Probability of each feature vector along the time axis to be chosen as the start of the vector" "span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature" "vectors will be masked along the time axis." ) }, ) mask_time_length: int = field( default=10, metadata={"help": "Length of vector span to mask along the time axis."}, ) mask_feature_prob: float = field( default=0.0, metadata={ "help": ( "Probability of each feature vector along the feature axis to be chosen as the start of the vectorspan" " to be masked. Approximately ``mask_feature_prob * sequence_length // mask_feature_length`` feature" " bins will be masked along the time axis." ) }, ) mask_feature_length: int = field( default=10, metadata={"help": "Length of vector span to mask along the feature axis."}, ) layerdrop: float = field(default=0.0, metadata={"help": "The LayerDrop probability."}) ctc_zero_infinity: bool = field( default=False, metadata={"help": "Whether to zero infinite losses and the associated gradients of `torch.nn.CTCLoss`."}, ) ctc_loss_reduction: Optional[str] = field( default="mean", metadata={"help": "The way the ctc loss should be reduced. Should be one of 'mean' or 'sum'."} ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default="google/xtreme_s", metadata={"help": "The name of the dataset to use (via the datasets library). Defaults to 'google/xtreme_s'"}, ) task: str = field( default=None, metadata={ "help": ( "The task name of the benchmark to use (via the datasets library). Should be on of: " "'fleurs-asr', 'mls', 'voxpopuli', 'covost2', 'minds14', 'fleurs-lang_id', 'babel'." ) }, ) language: str = field( default="all", metadata={"help": "The language id as defined in the datasets config name or `all` for all languages."}, ) language_group: str = field( default=None, metadata={ "help": ( "The language group to select a subset of languages to train on. " "This option is only used the 'fleurs-asr' task. Should be one of: " "'western_european_we', 'eastern_european_ee', 'central_asia_middle_north_african_cmn', " "'sub_saharan_african_ssa', 'south_asian_sa', 'south_east_asian_sea', 'chinese_japanase_korean_cjk'." ) }, ) train_split_name: str = field( default="train", metadata={ "help": "The name of the training dataset split to use (via the datasets library). Defaults to 'train'" }, ) eval_split_name: str = field( default="validation", metadata={ "help": ( "The name of the evaluation dataset split to use (via the datasets library). Defaults to 'validation'" ) }, ) predict_split_name: str = field( default="test", metadata={ "help": "The name of the prediction dataset split to use (via the datasets library). Defaults to 'test'" }, ) audio_column_name: str = field( default="audio", metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"}, ) target_column_name: str = field( default=None, metadata={ "help": ( "The name of the dataset column containing the target data (transcription/translation/label). If None," " the name will be inferred from the task. Defaults to None." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) chars_to_ignore: Optional[List[str]] = list_field( default=', ? . ! - ; : " “ % ‘ ” �'.split(" "), metadata={"help": "A list of characters to remove from the transcripts."}, ) max_duration_in_seconds: float = field( default=30.0, metadata={ "help": ( "Filter audio files that are longer than `max_duration_in_seconds` seconds to" " 'max_duration_in_seconds`" ) }, ) min_duration_in_seconds: float = field( default=0.0, metadata={"help": "Filter audio files that are shorter than `min_duration_in_seconds` seconds"} ) preprocessing_only: bool = field( default=False, metadata={ "help": ( "Whether to only do data preprocessing and skip training. This is especially useful when data" " preprocessing errors out in distributed training due to timeout. In this case, one should run the" " preprocessing in a non-distributed setup with `preprocessing_only=True` so that the cached datasets" " can consequently be loaded in distributed training" ) }, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "If :obj:`True`, will use the token generated when running" ":obj:`huggingface-cli login` as HTTP bearer authorization for remote files." ) }, ) unk_token: str = field( default="[UNK]", metadata={"help": "The unk token for the tokenizer"}, ) pad_token: str = field( default="[PAD]", metadata={"help": "The padding token for the tokenizer"}, ) word_delimiter_token: str = field( default="|", metadata={"help": "The word delimiter token for the tokenizer"}, ) phoneme_language: Optional[str] = field( default=None, metadata={ "help": ( "The target language that should be used be" " passed to the tokenizer for tokenization. Note that" " this is only relevant if the model classifies the" " input audio to a sequence of phoneme sequences." ) }, ) per_lang_metrics: bool = field( default=True, metadata={ "help": ( "If `True`, compute the test metrics separately for each language, and average the results. " "If `False` compute the average test metrics in a single pass for all languages at once." ) }, ) @dataclass class SpeechDataCollatorWithPadding: processor: AutoProcessor decoder_start_token_id: Optional[int] = None padding: Union[bool, str] = "longest" pad_labels: Optional[int] = True pad_to_multiple_of: Optional[int] = None pad_to_multiple_of_labels: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: # split inputs and labels since they have to be of different lenghts and need # different padding methods input_features = [{"input_values": feature["input_values"]} for feature in features] batch = self.processor.pad( input_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) if self.pad_labels: label_features = [{"input_ids": feature["labels"]} for feature in features] labels_batch = self.processor.pad( labels=label_features, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of_labels, return_tensors="pt", ) # replace padding with -100 to ignore loss correctly labels = labels_batch["input_ids"].masked_fill(labels_batch.attention_mask.ne(1), -100) # if bos token is appended in previous tokenization step, # cut bos token here as it's append later anyways if ( self.decoder_start_token_id is not None and (labels[:, 0] == self.decoder_start_token_id).all().cpu().item() ): labels = labels[:, 1:] batch["labels"] = labels else: batch["labels"] = torch.tensor([feature["labels"] for feature in features]) return batch def create_vocabulary_from_data( datasets: DatasetDict, word_delimiter_token: Optional[str] = None, unk_token: Optional[str] = None, pad_token: Optional[str] = None, ): # Given training and test labels create vocabulary def extract_all_chars(batch): all_text = " ".join(batch["target_text"]) vocab = list(set(all_text)) return {"vocab": [vocab], "all_text": [all_text]} vocabs = datasets.map( extract_all_chars, batched=True, batch_size=-1, keep_in_memory=True, remove_columns=datasets["train"].column_names, ) # take union of all unique characters in each dataset vocab_set = ( (set(vocabs["train"]["vocab"][0]) if "train" in vocabs else set()) | (set(vocabs["eval"]["vocab"][0]) if "eval" in vocabs else set()) | (set(vocabs["predict"]["vocab"][0]) if "predict" in vocabs else set()) ) vocab_dict = {v: k for k, v in enumerate(sorted(vocab_set))} # replace white space with delimiter token if word_delimiter_token is not None: vocab_dict[word_delimiter_token] = vocab_dict[" "] del vocab_dict[" "] # add unk and pad token if unk_token is not None: vocab_dict[unk_token] = len(vocab_dict) if pad_token is not None: vocab_dict[pad_token] = len(vocab_dict) return vocab_dict def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # 1. First, let's load the dataset raw_datasets = DatasetDict() task_name = data_args.task lang_id = data_args.language if task_name is None: raise ValueError( "Set --task should be set to '<xtreme_s_task>' (e.g. 'fleurs-asr', 'mls', 'covost2', 'minds14') " ) if lang_id is None: raise ValueError( "Set --language should be set to the language id of the sub dataset " "config to be used (e.g. 'pl', 'en.tr', 'fr-FR') or 'all'" " for multi-lingual fine-tuning." ) if data_args.language_group is not None: if data_args.task != "fleurs-asr": raise ValueError("--language_group should only be used with --task=fleurs-asr") if data_args.language != "all": raise ValueError("--language_group should only be used with --language=all") if data_args.target_column_name is None: target_column_name = TASK_TO_TARGET_COLUMN_NAME[task_name] else: target_column_name = data_args.target_column_name # here we differentiate between tasks with text as the target and classification tasks is_text_target = target_column_name in ("transcription", "translation") config_name = ".".join([task_name.split("-")[0], lang_id]) if training_args.do_train: raw_datasets["train"] = load_dataset( data_args.dataset_name, config_name, split=data_args.train_split_name, use_auth_token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--audio_column_name '{data_args.audio_column_name}' not found in dataset '{data_args.dataset_name}'." " Make sure to set `--audio_column_name` to the correct audio column - one of" f" {', '.join(raw_datasets['train'].column_names)}." ) if target_column_name not in raw_datasets["train"].column_names: raise ValueError( f"--target_column_name {target_column_name} not found in dataset '{data_args.dataset_name}'. " "Make sure to set `--target_column_name` to the correct text column - one of " f"{', '.join(raw_datasets['train'].column_names)}." ) if data_args.max_train_samples is not None: raw_datasets["train"] = raw_datasets["train"].select(range(data_args.max_train_samples)) if training_args.do_eval: raw_datasets["eval"] = load_dataset( data_args.dataset_name, config_name, split=data_args.eval_split_name, use_auth_token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) if data_args.max_eval_samples is not None: raw_datasets["eval"] = raw_datasets["eval"].select(range(data_args.max_eval_samples)) if training_args.do_predict: raw_datasets["predict"] = load_dataset( data_args.dataset_name, config_name, split=data_args.predict_split_name, use_auth_token=data_args.use_auth_token, cache_dir=model_args.cache_dir, ) if data_args.max_predict_samples is not None: raw_datasets["predict"] = raw_datasets["predict"].select(range(data_args.max_predict_samples)) lang_list = next(iter(raw_datasets.values())).features["lang_id"].names if not is_text_target: label_list = next(iter(raw_datasets.values())).features[target_column_name].names num_labels = len(label_list) num_workers = data_args.preprocessing_num_workers lang_group = data_args.language_group if lang_group is not None: with training_args.main_process_first(desc="language group filter"): lang_group_id = next(iter(raw_datasets.values())).features["lang_group_id"].str2int(lang_group) raw_datasets = raw_datasets.filter( lambda lang_group: lang_group == lang_group_id, num_proc=num_workers, input_columns=["lang_group_id"], ) # 2. We remove some special characters from the datasets # that make training complicated and do not help in transcribing the speech # E.g. characters, such as `,` and `.` do not really have an acoustic characteristic # that could be easily picked up by the model chars_to_ignore_regex = ( f'[{"".join(data_args.chars_to_ignore)}]' if data_args.chars_to_ignore is not None else None ) def remove_special_characters(batch): if chars_to_ignore_regex is not None: batch["target_text"] = re.sub(chars_to_ignore_regex, "", batch[target_column_name]).lower() + " " else: batch["target_text"] = batch[target_column_name].lower() + " " return batch if is_text_target: with training_args.main_process_first(desc="dataset map special characters removal"): raw_datasets = raw_datasets.map( remove_special_characters, remove_columns=[target_column_name], desc="remove special characters from datasets", ) # save special tokens for tokenizer word_delimiter_token = data_args.word_delimiter_token unk_token = data_args.unk_token pad_token = data_args.pad_token # 3. Next, let's load the config as we might need it to create # the tokenizer config = AutoConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token ) if is_text_target: # 4. (Optional, for ASR and translation) If no tokenizer file is defined, # we create the vocabulary of the model by extracting all unique characters from # the training and evaluation datasets # We need to make sure that only first rank saves vocabulary # make sure all processes wait until vocab is created tokenizer_name_or_path = model_args.tokenizer_name_or_path tokenizer_kwargs = {} if tokenizer_name_or_path is None: # save vocab in training output dir tokenizer_name_or_path = training_args.output_dir vocab_file = os.path.join(tokenizer_name_or_path, "vocab.json") with training_args.main_process_first(): if training_args.overwrite_output_dir and os.path.isfile(vocab_file): os.remove(vocab_file) with training_args.main_process_first(desc="dataset map vocabulary creation"): if not os.path.isfile(vocab_file): os.makedirs(tokenizer_name_or_path, exist_ok=True) vocab_dict = create_vocabulary_from_data( raw_datasets, word_delimiter_token=word_delimiter_token, unk_token=unk_token, pad_token=pad_token, ) # save vocab dict to be loaded into tokenizer with open(vocab_file, "w") as file: json.dump(vocab_dict, file) # if tokenizer has just been created # it is defined by `tokenizer_class` if present in config else by `model_type` if not config.is_encoder_decoder: tokenizer_kwargs = { "config": config if config.tokenizer_class is not None else None, "tokenizer_type": config.model_type if config.tokenizer_class is None else None, "unk_token": unk_token, "pad_token": pad_token, "word_delimiter_token": word_delimiter_token, } else: tokenizer_kwargs = {} # 5. Now we can instantiate the feature extractor, tokenizer and model # Note for distributed training, the .from_pretrained methods guarantee that only # one local process can concurrently download model & vocab. # load feature_extractor and tokenizer if is_text_target: tokenizer = AutoTokenizer.from_pretrained( tokenizer_name_or_path, use_auth_token=data_args.use_auth_token, **tokenizer_kwargs, ) feature_extractor = AutoFeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_auth_token=data_args.use_auth_token ) # adapt config # (speech translation requires pre-configured seq2seq models) if task_name != "covost2": config.update( { "feat_proj_dropout": model_args.feat_proj_dropout, "attention_dropout": model_args.attention_dropout, "hidden_dropout": model_args.hidden_dropout, "final_dropout": model_args.final_dropout, "mask_time_prob": model_args.mask_time_prob, "mask_time_length": model_args.mask_time_length, "mask_feature_prob": model_args.mask_feature_prob, "mask_feature_length": model_args.mask_feature_length, "gradient_checkpointing": training_args.gradient_checkpointing, "layerdrop": model_args.layerdrop, "ctc_zero_infinity": model_args.ctc_zero_infinity, "ctc_loss_reduction": model_args.ctc_loss_reduction, "activation_dropout": model_args.activation_dropout, } ) if training_args.do_train: if is_text_target: config.pad_token_id = tokenizer.pad_token_id config.vocab_size = len(tokenizer) else: label_to_id = {v: i for i, v in enumerate(label_list)} config.label2id = label_to_id config.id2label = {id: label for label, id in label_to_id.items()} config.num_labels = num_labels # create model if target_column_name == "transcription": model = AutoModelForCTC.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, use_auth_token=data_args.use_auth_token, ) elif config.is_encoder_decoder: model = AutoModelForSpeechSeq2Seq.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, use_auth_token=data_args.use_auth_token, ) if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") else: model = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, config=config, use_auth_token=data_args.use_auth_token, ) # freeze encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() # 6. Now we preprocess the datasets including loading the audio, resampling and normalization # Thankfully, `datasets` takes care of automatically loading and resampling the audio, # so that we just need to set the correct target sampling rate and normalize the input # via the `feature_extractor` # make sure that dataset decodes audio with correct sampling rate dataset_sampling_rate = next(iter(raw_datasets.values())).features[data_args.audio_column_name].sampling_rate if dataset_sampling_rate != feature_extractor.sampling_rate: raw_datasets = raw_datasets.cast_column( data_args.audio_column_name, datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate) ) # derive max & min input length for sample rate & max duration max_input_length = data_args.max_duration_in_seconds * feature_extractor.sampling_rate min_input_length = data_args.min_duration_in_seconds * feature_extractor.sampling_rate audio_column_name = data_args.audio_column_name # `phoneme_language` is only relevant if the model is fine-tuned on phoneme classification phoneme_language = data_args.phoneme_language # Preprocessing the datasets. # We need to read the audio files as arrays and tokenize the targets. def prepare_dataset(batch): # load audio sample = batch[audio_column_name] inputs = feature_extractor(sample["array"], sampling_rate=sample["sampling_rate"]) batch["input_values"] = inputs.input_values[0] batch["length"] = len(batch["input_values"]) # encode targets additional_kwargs = {} if phoneme_language is not None: additional_kwargs["phonemizer_lang"] = phoneme_language if is_text_target: batch["labels"] = tokenizer(batch["target_text"], **additional_kwargs).input_ids else: batch["labels"] = batch[target_column_name] batch["lang"] = batch["lang_id"] return batch with training_args.main_process_first(desc="dataset map preprocessing"): vectorized_datasets = raw_datasets.map( prepare_dataset, remove_columns=next(iter(raw_datasets.values())).column_names, num_proc=num_workers, desc="preprocess datasets", ) if training_args.do_train: def is_audio_in_length_range(length): return length > min_input_length and length < max_input_length # filter data that is shorter than min_input_length vectorized_datasets["train"] = vectorized_datasets["train"].filter( is_audio_in_length_range, num_proc=num_workers, input_columns=["length"], ) # 7. Next, we can prepare for the training step. # Let's use the appropriate XTREME-S evaluation metric, # instantiate a data collator and the trainer # Define evaluation metrics during training, *i.e.* word error rate, character error rate eval_metric = load_metric("xtreme_s", task_name) # for large datasets it is advised to run the preprocessing on a # single machine first with ``args.preprocessing_only`` since there will mostly likely # be a timeout when running the script in distributed mode. # In a second step ``args.preprocessing_only`` can then be set to `False` to load the # cached dataset if data_args.preprocessing_only: logger.info(f"Data preprocessing finished. Files cached at {vectorized_datasets.cache_files}") return def asr_logits_argmax(logits, labels): return logits.argmax(dim=-1) def compute_asr_metric(pred): pred.label_ids[pred.label_ids == -100] = tokenizer.pad_token_id pred_str = tokenizer.batch_decode(pred.predictions) # we do not want to group tokens when computing the metrics label_str = tokenizer.batch_decode(pred.label_ids, group_tokens=False) metric = eval_metric.compute(predictions=pred_str, references=label_str) return metric def compute_classification_metric(pred): pred_ids = np.argmax(pred.predictions, axis=1) metric = eval_metric.compute(predictions=pred_ids, references=pred.label_ids) return metric # Now save everything to be able to create a single processor later if is_main_process(training_args.local_rank): # save feature extractor, tokenizer and config feature_extractor.save_pretrained(training_args.output_dir) if is_text_target: tokenizer.save_pretrained(training_args.output_dir) config.save_pretrained(training_args.output_dir) # wait until configs are saved in the main process before loading the processor if training_args.local_rank != -1: torch.distributed.barrier() if is_text_target: processor = AutoProcessor.from_pretrained(training_args.output_dir) else: processor = AutoFeatureExtractor.from_pretrained(training_args.output_dir) # Instantiate custom data collator data_collator = SpeechDataCollatorWithPadding(processor=processor, pad_labels=is_text_target) # Initialize Trainer if target_column_name == "translation": trainer = Seq2SeqTrainer( model=model, data_collator=data_collator, args=training_args, preprocess_logits_for_metrics=asr_logits_argmax if training_args.predict_with_generate else None, compute_metrics=compute_asr_metric if training_args.predict_with_generate else None, train_dataset=vectorized_datasets["train"] if training_args.do_train else None, eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, tokenizer=feature_extractor, ) else: trainer = Trainer( model=model, data_collator=data_collator, args=training_args, preprocess_logits_for_metrics=asr_logits_argmax if is_text_target else None, compute_metrics=compute_asr_metric if is_text_target else compute_classification_metric, train_dataset=vectorized_datasets["train"] if training_args.do_train else None, eval_dataset=vectorized_datasets["eval"] if training_args.do_eval else None, tokenizer=feature_extractor, ) # 8. Finally, we can start training # Training if training_args.do_train: # use last checkpoint if exist if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() metrics = train_result.metrics max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(vectorized_datasets["train"]) ) metrics["train_samples"] = min(max_train_samples, len(vectorized_datasets["train"])) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation on the test set results = {} if training_args.do_predict: logger.info(f"*** Evaluating on the `{data_args.predict_split_name}` set ***") if data_args.per_lang_metrics: # separate the `test` dataset into language-specific subsets and compute metrics for each of them metrics = {} average_metrics = defaultdict(list) for lang_id in range(len(lang_list)): lang_name = lang_list[lang_id] with training_args.main_process_first(desc="per-language dataset filter"): lang_dataset = vectorized_datasets["predict"].filter( lambda lang: lang == lang_id, num_proc=num_workers, input_columns=["lang"], ) lang_metrics = trainer.evaluate(lang_dataset) redundant_metrics = ["eval_runtime", "eval_samples_per_second", "eval_steps_per_second", "eval_epoch"] for metric_name, value in lang_metrics.items(): average_metrics[metric_name].append(value) if metric_name not in redundant_metrics: metrics[f"{metric_name}_{lang_name}"] = value for metric_name, value in average_metrics.items(): metrics[metric_name] = np.mean(value) else: metrics = trainer.evaluate(vectorized_datasets["predict"]) max_predict_samples = ( data_args.max_predict_samples if data_args.max_predict_samples is not None else len(vectorized_datasets["predict"]) ) metrics["predict_samples"] = min(max_predict_samples, len(vectorized_datasets["predict"])) # make sure that the `predict` metrics end up in the log history for the model card trainer.log(OrderedDict(sorted(metrics.items()))) trainer.log_metrics("predict", metrics) trainer.save_metrics("predict", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "tasks": task_name, "tags": [task_name, data_args.dataset_name], "dataset_args": ( f"Config: {config_name}, Training split: {data_args.train_split_name}, Eval split:" f" {data_args.eval_split_name}, Predict split: {data_args.predict_split_name}" ), "dataset": f"{data_args.dataset_name.upper()} - {config_name.upper()}", "language": data_args.language, } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) return results if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mm-imdb/README.md
## MM-IMDb Based on the script [`run_mmimdb.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/mm-imdb/run_mmimdb.py). [MM-IMDb](http://lisi1.unal.edu.co/mmimdb/) is a Multimodal dataset with around 26,000 movies including images, plots and other metadata. ### Training on MM-IMDb ``` python run_mmimdb.py \ --data_dir /path/to/mmimdb/dataset/ \ --model_type bert \ --model_name_or_path bert-base-uncased \ --output_dir /path/to/save/dir/ \ --do_train \ --do_eval \ --max_seq_len 512 \ --gradient_accumulation_steps 20 \ --num_image_embeds 3 \ --num_train_epochs 100 \ --patience 5 ```
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mm-imdb/run_mmimdb.py
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for multimodal multiclass prediction on MM-IMDB dataset.""" import argparse import glob import json import logging import os import random import numpy as np import torch from sklearn.metrics import f1_score from torch import nn from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_transforms, get_mmimdb_labels import transformers from transformers import ( WEIGHTS_NAME, AdamW, AutoConfig, AutoModel, AutoTokenizer, MMBTConfig, MMBTForClassification, get_linear_schedule_with_warmup, ) from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def train(args, train_dataset, model, tokenizer, criterion): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader( train_dataset, sampler=train_sampler, batch_size=args.train_batch_size, collate_fn=collate_fn, num_workers=args.num_workers, ) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 best_f1, n_no_improve = 0, 0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) labels = batch[5] inputs = { "input_ids": batch[0], "input_modal": batch[2], "attention_mask": batch[1], "modal_start_tokens": batch[3], "modal_end_tokens": batch[4], } outputs = model(**inputs) logits = outputs[0] # model outputs are always tuple in transformers (see doc) loss = criterion(logits, labels) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: logs = {} if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer, criterion) for key, value in results.items(): eval_key = "eval_{}".format(key) logs[eval_key] = value loss_scalar = (tr_loss - logging_loss) / args.logging_steps learning_rate_scalar = scheduler.get_lr()[0] logs["learning_rate"] = learning_rate_scalar logs["loss"] = loss_scalar logging_loss = tr_loss for key, value in logs.items(): tb_writer.add_scalar(key, value, global_step) print(json.dumps({**logs, **{"step": global_step}})) if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training torch.save(model_to_save.state_dict(), os.path.join(output_dir, WEIGHTS_NAME)) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank == -1: results = evaluate(args, model, tokenizer, criterion) if results["micro_f1"] > best_f1: best_f1 = results["micro_f1"] n_no_improve = 0 else: n_no_improve += 1 if n_no_improve > args.patience: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, criterion, prefix=""): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_output_dir = args.output_dir eval_dataset = load_examples(args, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader( eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size, collate_fn=collate_fn ) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): model = nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): batch = tuple(t.to(args.device) for t in batch) labels = batch[5] inputs = { "input_ids": batch[0], "input_modal": batch[2], "attention_mask": batch[1], "modal_start_tokens": batch[3], "modal_end_tokens": batch[4], } outputs = model(**inputs) logits = outputs[0] # model outputs are always tuple in transformers (see doc) tmp_eval_loss = criterion(logits, labels) eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = torch.sigmoid(logits).detach().cpu().numpy() > 0.5 out_label_ids = labels.detach().cpu().numpy() else: preds = np.append(preds, torch.sigmoid(logits).detach().cpu().numpy() > 0.5, axis=0) out_label_ids = np.append(out_label_ids, labels.detach().cpu().numpy(), axis=0) eval_loss = eval_loss / nb_eval_steps result = { "loss": eval_loss, "macro_f1": f1_score(out_label_ids, preds, average="macro"), "micro_f1": f1_score(out_label_ids, preds, average="micro"), } output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return result def load_examples(args, tokenizer, evaluate=False): path = os.path.join(args.data_dir, "dev.jsonl" if evaluate else "train.jsonl") transforms = get_image_transforms() labels = get_mmimdb_labels() dataset = JsonlDataset(path, tokenizer, transforms, labels, args.max_seq_length - args.num_image_embeds - 2) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .jsonl files for MMIMDB.", ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default=None, type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--max_seq_length", default=128, type=int, help=( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--num_image_embeds", default=1, type=int, help="Number of Image Embeddings from the Image Encoder" ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument("--patience", default=5, type=int, help="Patience for Early Stopping.") parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument("--num_workers", type=int, default=8, help="number of worker threads for dataloading") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab # Setup model labels = get_mmimdb_labels() num_labels = len(labels) transformer_config = AutoConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir, ) transformer = AutoModel.from_pretrained( args.model_name_or_path, config=transformer_config, cache_dir=args.cache_dir ) img_encoder = ImageEncoder(args) config = MMBTConfig(transformer_config, num_labels=num_labels) model = MMBTForClassification(config, transformer, img_encoder) if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_examples(args, tokenizer, evaluate=False) label_frequences = train_dataset.get_label_frequencies() label_frequences = [label_frequences[l] for l in labels] label_weights = ( torch.tensor(label_frequences, device=args.device, dtype=torch.float) / len(train_dataset) ) ** -1 criterion = nn.BCEWithLogitsLoss(pos_weight=label_weights) global_step, tr_loss = train(args, train_dataset, model, tokenizer, criterion) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training torch.save(model_to_save.state_dict(), os.path.join(args.output_dir, WEIGHTS_NAME)) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = MMBTForClassification(config, transformer, img_encoder) model.load_state_dict(torch.load(os.path.join(args.output_dir, WEIGHTS_NAME))) tokenizer = AutoTokenizer.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = MMBTForClassification(config, transformer, img_encoder) model.load_state_dict(torch.load(checkpoint)) model.to(args.device) result = evaluate(args, model, tokenizer, criterion, prefix=prefix) result = {k + "_{}".format(global_step): v for k, v in result.items()} results.update(result) return results if __name__ == "__main__": main()
0
hf_public_repos/transformers/examples/research_projects
hf_public_repos/transformers/examples/research_projects/mm-imdb/utils_mmimdb.py
# coding=utf-8 # Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset POOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class ImageEncoder(nn.Module): def __init__(self, args): super().__init__() model = torchvision.models.resnet152(pretrained=True) modules = list(model.children())[:-2] self.model = nn.Sequential(*modules) self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[args.num_image_embeds]) def forward(self, x): # Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048 out = self.pool(self.model(x)) out = torch.flatten(out, start_dim=2) out = out.transpose(1, 2).contiguous() return out # BxNx2048 class JsonlDataset(Dataset): def __init__(self, data_path, tokenizer, transforms, labels, max_seq_length): self.data = [json.loads(l) for l in open(data_path)] self.data_dir = os.path.dirname(data_path) self.tokenizer = tokenizer self.labels = labels self.n_classes = len(labels) self.max_seq_length = max_seq_length self.transforms = transforms def __len__(self): return len(self.data) def __getitem__(self, index): sentence = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"], add_special_tokens=True)) start_token, sentence, end_token = sentence[0], sentence[1:-1], sentence[-1] sentence = sentence[: self.max_seq_length] label = torch.zeros(self.n_classes) label[[self.labels.index(tgt) for tgt in self.data[index]["label"]]] = 1 image = Image.open(os.path.join(self.data_dir, self.data[index]["img"])).convert("RGB") image = self.transforms(image) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def get_label_frequencies(self): label_freqs = Counter() for row in self.data: label_freqs.update(row["label"]) return label_freqs def collate_fn(batch): lens = [len(row["sentence"]) for row in batch] bsz, max_seq_len = len(batch), max(lens) mask_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) text_tensor = torch.zeros(bsz, max_seq_len, dtype=torch.long) for i_batch, (input_row, length) in enumerate(zip(batch, lens)): text_tensor[i_batch, :length] = input_row["sentence"] mask_tensor[i_batch, :length] = 1 img_tensor = torch.stack([row["image"] for row in batch]) tgt_tensor = torch.stack([row["label"] for row in batch]) img_start_token = torch.stack([row["image_start_token"] for row in batch]) img_end_token = torch.stack([row["image_end_token"] for row in batch]) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def get_mmimdb_labels(): return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def get_image_transforms(): return transforms.Compose( [ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017], std=[0.12221994, 0.12145835, 0.14380469], ), ] )
0
hf_public_repos/transformers/examples
hf_public_repos/transformers/examples/legacy/README.md
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Legacy examples This folder contains examples which are not actively maintained (mostly contributed by the community). Using these examples together with a recent version of the library usually requires to make small (sometimes big) adaptations to get the scripts working.
0
hf_public_repos/transformers/examples
hf_public_repos/transformers/examples/legacy/run_chinese_ref.py
#!/usr/bin/env python import argparse import json from typing import List from ltp import LTP from transformers import BertTokenizer def _is_chinese_char(cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ( (cp >= 0x4E00 and cp <= 0x9FFF) or (cp >= 0x3400 and cp <= 0x4DBF) # or (cp >= 0x20000 and cp <= 0x2A6DF) # or (cp >= 0x2A700 and cp <= 0x2B73F) # or (cp >= 0x2B740 and cp <= 0x2B81F) # or (cp >= 0x2B820 and cp <= 0x2CEAF) # or (cp >= 0xF900 and cp <= 0xFAFF) or (cp >= 0x2F800 and cp <= 0x2FA1F) # ): # return True return False def is_chinese(word: str): # word like '180' or '身高' or '神' for char in word: char = ord(char) if not _is_chinese_char(char): return 0 return 1 def get_chinese_word(tokens: List[str]): word_set = set() for token in tokens: chinese_word = len(token) > 1 and is_chinese(token) if chinese_word: word_set.add(token) word_list = list(word_set) return word_list def add_sub_symbol(bert_tokens: List[str], chinese_word_set: set()): if not chinese_word_set: return bert_tokens max_word_len = max([len(w) for w in chinese_word_set]) bert_word = bert_tokens start, end = 0, len(bert_word) while start < end: single_word = True if is_chinese(bert_word[start]): l = min(end - start, max_word_len) for i in range(l, 1, -1): whole_word = "".join(bert_word[start : start + i]) if whole_word in chinese_word_set: for j in range(start + 1, start + i): bert_word[j] = "##" + bert_word[j] start = start + i single_word = False break if single_word: start += 1 return bert_word def prepare_ref(lines: List[str], ltp_tokenizer: LTP, bert_tokenizer: BertTokenizer): ltp_res = [] for i in range(0, len(lines), 100): res = ltp_tokenizer.seg(lines[i : i + 100])[0] res = [get_chinese_word(r) for r in res] ltp_res.extend(res) assert len(ltp_res) == len(lines) bert_res = [] for i in range(0, len(lines), 100): res = bert_tokenizer(lines[i : i + 100], add_special_tokens=True, truncation=True, max_length=512) bert_res.extend(res["input_ids"]) assert len(bert_res) == len(lines) ref_ids = [] for input_ids, chinese_word in zip(bert_res, ltp_res): input_tokens = [] for id in input_ids: token = bert_tokenizer._convert_id_to_token(id) input_tokens.append(token) input_tokens = add_sub_symbol(input_tokens, chinese_word) ref_id = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(input_tokens): if token[:2] == "##": clean_token = token[2:] # save chinese tokens' pos if len(clean_token) == 1 and _is_chinese_char(ord(clean_token)): ref_id.append(i) ref_ids.append(ref_id) assert len(ref_ids) == len(bert_res) return ref_ids def main(args): # For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm) # If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp) with open(args.file_name, "r", encoding="utf-8") as f: data = f.readlines() data = [line.strip() for line in data if len(line) > 0 and not line.isspace()] # avoid delimiter like '\u2029' ltp_tokenizer = LTP(args.ltp) # faster in GPU device bert_tokenizer = BertTokenizer.from_pretrained(args.bert) ref_ids = prepare_ref(data, ltp_tokenizer, bert_tokenizer) with open(args.save_path, "w", encoding="utf-8") as f: data = [json.dumps(ref) + "\n" for ref in ref_ids] f.writelines(data) if __name__ == "__main__": parser = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path" ) parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer") parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res") args = parser.parse_args() main(args)
0