text
stringlengths 7
3.71M
| id
stringlengths 12
166
| metadata
dict | __index_level_0__
int64 0
658
|
---|---|---|---|
from _pytest.fixtures import fixture
from nist_mt import Nist_mt
nist = Nist_mt()
@fixture
def hypothesis_sent():
return "It is a guide to action which ensures that the military always obeys the commands of the party"
@fixture
def reference_sent1():
return "It is a guide to action that ensures that the military will forever heed Party commands"
@fixture
def reference_sent2():
return (
"It is the guiding principle which guarantees the military forces always being under the command of the Party"
)
@fixture
def reference_sent3():
return "It is the practical guide for the army always to heed the directions of the party"
def test_nist_sentence(hypothesis_sent, reference_sent1, reference_sent2, reference_sent3):
nist_score = nist.compute(
predictions=[hypothesis_sent], references=[[reference_sent1, reference_sent2, reference_sent3]]
)
assert abs(nist_score["nist_mt"] - 3.3709935957649324) < 1e-6
| evaluate/metrics/nist_mt/tests.py/0 | {
"file_path": "evaluate/metrics/nist_mt/tests.py",
"repo_id": "evaluate",
"token_count": 308
} | 290 |
# Copyright 2020 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" seqeval metric. """
import importlib
from typing import List, Optional, Union
import datasets
from seqeval.metrics import accuracy_score, classification_report
import evaluate
_CITATION = """\
@inproceedings{ramshaw-marcus-1995-text,
title = "Text Chunking using Transformation-Based Learning",
author = "Ramshaw, Lance and
Marcus, Mitch",
booktitle = "Third Workshop on Very Large Corpora",
year = "1995",
url = "https://www.aclweb.org/anthology/W95-0107",
}
@misc{seqeval,
title={{seqeval}: A Python framework for sequence labeling evaluation},
url={https://github.com/chakki-works/seqeval},
note={Software available from https://github.com/chakki-works/seqeval},
author={Hiroki Nakayama},
year={2018},
}
"""
_DESCRIPTION = """\
seqeval is a Python framework for sequence labeling evaluation.
seqeval can evaluate the performance of chunking tasks such as named-entity recognition, part-of-speech tagging, semantic role labeling and so on.
This is well-tested by using the Perl script conlleval, which can be used for
measuring the performance of a system that has processed the CoNLL-2000 shared task data.
seqeval supports following formats:
IOB1
IOB2
IOE1
IOE2
IOBES
See the [README.md] file at https://github.com/chakki-works/seqeval for more information.
"""
_KWARGS_DESCRIPTION = """
Produces labelling scores along with its sufficient statistics
from a source against one or more references.
Args:
predictions: List of List of predicted labels (Estimated targets as returned by a tagger)
references: List of List of reference labels (Ground truth (correct) target values)
suffix: True if the IOB prefix is after type, False otherwise. default: False
scheme: Specify target tagging scheme. Should be one of ["IOB1", "IOB2", "IOE1", "IOE2", "IOBES", "BILOU"].
default: None
mode: Whether to count correct entity labels with incorrect I/B tags as true positives or not.
If you want to only count exact matches, pass mode="strict". default: None.
sample_weight: Array-like of shape (n_samples,), weights for individual samples. default: None
zero_division: Which value to substitute as a metric value when encountering zero division. Should be on of 0, 1,
"warn". "warn" acts as 0, but the warning is raised.
Returns:
'scores': dict. Summary of the scores for overall and per type
Overall:
'accuracy': accuracy,
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure,
Per type:
'precision': precision,
'recall': recall,
'f1': F1 score, also known as balanced F-score or F-measure
Examples:
>>> predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> seqeval = evaluate.load("seqeval")
>>> results = seqeval.compute(predictions=predictions, references=references)
>>> print(list(results.keys()))
['MISC', 'PER', 'overall_precision', 'overall_recall', 'overall_f1', 'overall_accuracy']
>>> print(results["overall_f1"])
0.5
>>> print(results["PER"]["f1"])
1.0
"""
@evaluate.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION)
class Seqeval(evaluate.Metric):
def _info(self):
return evaluate.MetricInfo(
description=_DESCRIPTION,
citation=_CITATION,
homepage="https://github.com/chakki-works/seqeval",
inputs_description=_KWARGS_DESCRIPTION,
features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
"references": datasets.Sequence(datasets.Value("string", id="label"), id="sequence"),
}
),
codebase_urls=["https://github.com/chakki-works/seqeval"],
reference_urls=["https://github.com/chakki-works/seqeval"],
)
def _compute(
self,
predictions,
references,
suffix: bool = False,
scheme: Optional[str] = None,
mode: Optional[str] = None,
sample_weight: Optional[List[int]] = None,
zero_division: Union[str, int] = "warn",
):
if scheme is not None:
try:
scheme_module = importlib.import_module("seqeval.scheme")
scheme = getattr(scheme_module, scheme)
except AttributeError:
raise ValueError(f"Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {scheme}")
report = classification_report(
y_true=references,
y_pred=predictions,
suffix=suffix,
output_dict=True,
scheme=scheme,
mode=mode,
sample_weight=sample_weight,
zero_division=zero_division,
)
report.pop("macro avg")
report.pop("weighted avg")
overall_score = report.pop("micro avg")
scores = {
type_name: {
"precision": score["precision"],
"recall": score["recall"],
"f1": score["f1-score"],
"number": score["support"],
}
for type_name, score in report.items()
}
scores["overall_precision"] = overall_score["precision"]
scores["overall_recall"] = overall_score["recall"]
scores["overall_f1"] = overall_score["f1-score"]
scores["overall_accuracy"] = accuracy_score(y_true=references, y_pred=predictions)
return scores
| evaluate/metrics/seqeval/seqeval.py/0 | {
"file_path": "evaluate/metrics/seqeval/seqeval.py",
"repo_id": "evaluate",
"token_count": 2499
} | 291 |
---
title: WER
emoji: 🤗
colorFrom: blue
colorTo: red
sdk: gradio
sdk_version: 3.19.1
app_file: app.py
pinned: false
tags:
- evaluate
- metric
description: >-
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
---
# Metric Card for WER
## Metric description
Word error rate (WER) is a common metric of the performance of an automatic speech recognition (ASR) system.
The general difficulty of measuring the performance of ASR systems lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the [Levenshtein distance](https://en.wikipedia.org/wiki/Levenshtein_distance), working at the word level.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between [perplexity](https://huggingface.co/metrics/perplexity) and word error rate (see [this article](https://www.cs.cmu.edu/~roni/papers/eval-metrics-bntuw-9802.pdf) for further information).
Word error rate can then be computed as:
`WER = (S + D + I) / N = (S + D + I) / (S + D + C)`
where
`S` is the number of substitutions,
`D` is the number of deletions,
`I` is the number of insertions,
`C` is the number of correct words,
`N` is the number of words in the reference (`N=S+D+C`).
## How to use
The metric takes two inputs: references (a list of references for each speech input) and predictions (a list of transcriptions to score).
```python
from evaluate import load
wer = load("wer")
wer_score = wer.compute(predictions=predictions, references=references)
```
## Output values
This metric outputs a float representing the word error rate.
```
print(wer_score)
0.5
```
This value indicates the average number of errors per reference word.
The **lower** the value, the **better** the performance of the ASR system, with a WER of 0 being a perfect score.
### Values from popular papers
This metric is highly dependent on the content and quality of the dataset, and therefore users can expect very different values for the same model but on different datasets.
For example, datasets such as [LibriSpeech](https://huggingface.co/datasets/librispeech_asr) report a WER in the 1.8-3.3 range, whereas ASR models evaluated on [Timit](https://huggingface.co/datasets/timit_asr) report a WER in the 8.3-20.4 range.
See the leaderboards for [LibriSpeech](https://paperswithcode.com/sota/speech-recognition-on-librispeech-test-clean) and [Timit](https://paperswithcode.com/sota/speech-recognition-on-timit) for the most recent values.
## Examples
Perfect match between prediction and reference:
```python
from evaluate import load
wer = load("wer")
predictions = ["hello world", "good night moon"]
references = ["hello world", "good night moon"]
wer_score = wer.compute(predictions=predictions, references=references)
print(wer_score)
0.0
```
Partial match between prediction and reference:
```python
from evaluate import load
wer = load("wer")
predictions = ["this is the prediction", "there is an other sample"]
references = ["this is the reference", "there is another one"]
wer_score = wer.compute(predictions=predictions, references=references)
print(wer_score)
0.5
```
No match between prediction and reference:
```python
from evaluate import load
wer = load("wer")
predictions = ["hello world", "good night moon"]
references = ["hi everyone", "have a great day"]
wer_score = wer.compute(predictions=predictions, references=references)
print(wer_score)
1.0
```
## Limitations and bias
WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
## Citation
```bibtex
@inproceedings{woodard1982,
author = {Woodard, J.P. and Nelson, J.T.,
year = {1982},
journal = {Workshop on standardisation for speech I/O technology, Naval Air Development Center, Warminster, PA},
title = {An information theoretic measure of speech recognition performance}
}
```
```bibtex
@inproceedings{morris2004,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
```
## Further References
- [Word Error Rate -- Wikipedia](https://en.wikipedia.org/wiki/Word_error_rate)
- [Hugging Face Tasks -- Automatic Speech Recognition](https://huggingface.co/tasks/automatic-speech-recognition)
| evaluate/metrics/wer/README.md/0 | {
"file_path": "evaluate/metrics/wer/README.md",
"repo_id": "evaluate",
"token_count": 1731
} | 292 |
[metadata]
license_file = LICENSE
[isort]
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, W503
max-line-length = 119
exclude =
src/datasets/datasets
src/datasets/metrics
per-file-ignores =
metrics/*:F401
| evaluate/setup.cfg/0 | {
"file_path": "evaluate/setup.cfg",
"repo_id": "evaluate",
"token_count": 148
} | 293 |
# Copyright 2022 The HuggingFace Evaluate Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from datasets import ClassLabel, Dataset, Sequence
from typing_extensions import Literal
from ..module import EvaluationModule
from ..utils.file_utils import add_end_docstrings, add_start_docstrings
from .base import EVALUATOR_COMPUTE_RETURN_DOCSTRING, EVALUTOR_COMPUTE_START_DOCSTRING, Evaluator
from .utils import DatasetColumn
if TYPE_CHECKING:
from transformers import Pipeline, PreTrainedModel, PreTrainedTokenizer, TFPreTrainedModel
TASK_DOCUMENTATION = r"""
The dataset input and label columns are expected to be formatted as a list of words and a list of labels respectively, following [conll2003 dataset](https://huggingface.co/datasets/conll2003). Datasets whose inputs are single strings, and labels are a list of offset are not supported.
Examples:
```python
>>> from evaluate import evaluator
>>> from datasets import load_dataset
>>> task_evaluator = evaluator("token-classification")
>>> data = load_dataset("conll2003", split="validation[:2]")
>>> results = task_evaluator.compute(
>>> model_or_pipeline="elastic/distilbert-base-uncased-finetuned-conll03-english",
>>> data=data,
>>> metric="seqeval",
>>> )
```
<Tip>
For example, the following dataset format is accepted by the evaluator:
```python
dataset = Dataset.from_dict(
mapping={
"tokens": [["New", "York", "is", "a", "city", "and", "Felix", "a", "person", "."]],
"ner_tags": [[1, 2, 0, 0, 0, 0, 3, 0, 0, 0]],
},
features=Features({
"tokens": Sequence(feature=Value(dtype="string")),
"ner_tags": Sequence(feature=ClassLabel(names=["O", "B-LOC", "I-LOC", "B-PER", "I-PER"])),
}),
)
```
</Tip>
<Tip warning={true}>
For example, the following dataset format is **not** accepted by the evaluator:
```python
dataset = Dataset.from_dict(
mapping={
"tokens": [["New York is a city and Felix a person."]],
"starts": [[0, 23]],
"ends": [[7, 27]],
"ner_tags": [["LOC", "PER"]],
},
features=Features({
"tokens": Value(dtype="string"),
"starts": Sequence(feature=Value(dtype="int32")),
"ends": Sequence(feature=Value(dtype="int32")),
"ner_tags": Sequence(feature=Value(dtype="string")),
}),
)
```
</Tip>
"""
class TokenClassificationEvaluator(Evaluator):
"""
Token classification evaluator.
This token classification evaluator can currently be loaded from [`evaluator`] using the default task name
`token-classification`.
Methods in this class assume a data format compatible with the [`~transformers.TokenClassificationPipeline`].
"""
PIPELINE_KWARGS = {"ignore_labels": []}
def __init__(self, task="token-classification", default_metric_name=None):
super().__init__(task, default_metric_name=default_metric_name)
def predictions_processor(self, predictions: List[List[Dict]], words: List[List[str]], join_by: str):
"""
Transform the pipeline predictions into a list of predicted labels of the same length as the true labels.
Args:
predictions (`List[List[Dict]]`):
List of pipeline predictions, where each token has been labeled.
words (`List[List[str]]`):
Original input data to the pipeline, used to build predicted labels of the same length.
join_by (`str`):
String to use to join two words. In English, it will typically be " ".
Returns:
`dict`: a dictionary holding the predictions
"""
preds = []
# iterate over the data rows
for i, prediction in enumerate(predictions):
pred_processed = []
# get a list of tuples giving the indexes of the start and end character of each word
words_offsets = self.words_to_offsets(words[i], join_by)
token_index = 0
for word_offset in words_offsets:
# for each word, we may keep only the predicted label for the first token, discard the others
while prediction[token_index]["start"] < word_offset[0]:
token_index += 1
if prediction[token_index]["start"] > word_offset[0]: # bad indexing
pred_processed.append("O")
elif prediction[token_index]["start"] == word_offset[0]:
pred_processed.append(prediction[token_index]["entity"])
preds.append(pred_processed)
return {"predictions": preds}
def words_to_offsets(self, words: List[str], join_by: str):
"""
Convert a list of words to a list of offsets, where word are joined by `join_by`.
Args:
words (`List[str]`):
List of words to get offsets from.
join_by (`str`):
String to insert between words.
Returns:
`List[Tuple[int, int]]`: List of the characters (start index, end index) for each of the words.
"""
offsets = []
start = 0
for word in words:
end = start + len(word) - 1
offsets.append((start, end))
start = end + len(join_by) + 1
return offsets
def prepare_data(self, data: Union[str, Dataset], input_column: str, label_column: str, join_by: str):
super().prepare_data(data, input_column, label_column)
if not isinstance(data.features[input_column], Sequence) or not isinstance(
data.features[label_column], Sequence
):
raise ValueError(
"TokenClassificationEvaluator expects the input and label columns to be provided as lists."
)
# If the labels are of type ClassLabel, they are already integers and we have the map stored somewhere.
# Otherwise, we have to get the list of labels manually.
labels_are_int = isinstance(data.features[label_column].feature, ClassLabel)
if labels_are_int:
label_list = data.features[label_column].feature.names # list of string labels
id_to_label = {i: label for i, label in enumerate(label_list)}
references = [[id_to_label[label_id] for label_id in label_ids] for label_ids in data[label_column]]
elif data.features[label_column].feature.dtype.startswith("int"):
raise NotImplementedError(
"References provided as integers, but the reference column is not a Sequence of ClassLabels."
)
else:
# In the event the labels are not a `Sequence[ClassLabel]`, we have already labels as strings
# An example is labels as ["PER", "PER", "O", "LOC", "O", "LOC", "O"], e.g. in polyglot_ner dataset
references = data[label_column]
metric_inputs = {"references": references}
data = data.map(lambda x: {input_column: join_by.join(x[input_column])})
pipeline_inputs = DatasetColumn(data, input_column)
return metric_inputs, pipeline_inputs
def prepare_pipeline(
self,
model_or_pipeline: Union[str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel"], # noqa: F821
tokenizer: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
feature_extractor: Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] = None, # noqa: F821
device: int = None,
):
pipe = super().prepare_pipeline(model_or_pipeline, tokenizer, feature_extractor, device)
# check the pipeline outputs start characters in its predictions
dummy_output = pipe(["2003 New York Gregory"], **self.PIPELINE_KWARGS)
if dummy_output[0][0]["start"] is None:
raise ValueError(
"TokenClassificationEvaluator supports only pipelines giving 'start' index as a pipeline output (got None). "
"Transformers pipelines with a slow tokenizer will raise this error."
)
return pipe
@add_start_docstrings(EVALUTOR_COMPUTE_START_DOCSTRING)
@add_end_docstrings(EVALUATOR_COMPUTE_RETURN_DOCSTRING, TASK_DOCUMENTATION)
def compute(
self,
model_or_pipeline: Union[
str, "Pipeline", Callable, "PreTrainedModel", "TFPreTrainedModel" # noqa: F821
] = None,
data: Union[str, Dataset] = None,
subset: Optional[str] = None,
split: str = None,
metric: Union[str, EvaluationModule] = None,
tokenizer: Optional[Union[str, "PreTrainedTokenizer"]] = None, # noqa: F821
strategy: Literal["simple", "bootstrap"] = "simple",
confidence_level: float = 0.95,
n_resamples: int = 9999,
device: Optional[int] = None,
random_state: Optional[int] = None,
input_column: str = "tokens",
label_column: str = "ner_tags",
join_by: Optional[str] = " ",
) -> Tuple[Dict[str, float], Any]:
"""
input_column (`str`, defaults to `"tokens"`):
The name of the column containing the tokens feature in the dataset specified by `data`.
label_column (`str`, defaults to `"label"`):
The name of the column containing the labels in the dataset specified by `data`.
join_by (`str`, *optional*, defaults to `" "`):
This evaluator supports dataset whose input column is a list of words. This parameter specifies how to join
words to generate a string input. This is especially useful for languages that do not separate words by a space.
"""
result = {}
self.check_for_mismatch_in_device_setup(device, model_or_pipeline)
# Prepare inputs
data = self.load_data(data=data, subset=subset, split=split)
metric_inputs, pipe_inputs = self.prepare_data(
data=data, input_column=input_column, label_column=label_column, join_by=join_by
)
pipe = self.prepare_pipeline(model_or_pipeline=model_or_pipeline, tokenizer=tokenizer, device=device)
metric = self.prepare_metric(metric)
# Compute predictions
predictions, perf_results = self.call_pipeline(pipe, pipe_inputs)
predictions = self.predictions_processor(predictions, data[input_column], join_by)
metric_inputs.update(predictions)
# Compute metrics from references and predictions
metric_results = self.compute_metric(
metric=metric,
metric_inputs=metric_inputs,
strategy=strategy,
confidence_level=confidence_level,
n_resamples=n_resamples,
random_state=random_state,
)
result.update(metric_results)
result.update(perf_results)
return result
| evaluate/src/evaluate/evaluator/token_classification.py/0 | {
"file_path": "evaluate/src/evaluate/evaluator/token_classification.py",
"repo_id": "evaluate",
"token_count": 4676
} | 294 |
import evaluate
from evaluate.utils import launch_gradio_widget
module = evaluate.load("{{ cookiecutter.namespace }}/{{ cookiecutter.module_slug }}")
launch_gradio_widget(module) | evaluate/templates/{{ cookiecutter.module_slug }}/app.py/0 | {
"file_path": "evaluate/templates/{{ cookiecutter.module_slug }}/app.py",
"repo_id": "evaluate",
"token_count": 52
} | 295 |
import os
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from pathlib import Path
from unittest.mock import patch
from evaluate import config
def parse_flag_from_env(key, default=False):
try:
value = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_value = default
else:
# KEY is set, convert it to True or False.
try:
_value = strtobool(value)
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f"If set, {key} must be yes or no.")
return _value
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
_run_remote_tests = parse_flag_from_env("RUN_REMOTE", default=False)
_run_local_tests = parse_flag_from_env("RUN_LOCAL", default=True)
_run_packaged_tests = parse_flag_from_env("RUN_PACKAGED", default=True)
def require_beam(test_case):
"""
Decorator marking a test that requires Apache Beam.
These tests are skipped when Apache Beam isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_faiss(test_case):
"""
Decorator marking a test that requires Faiss.
These tests are skipped when Faiss isn't installed.
"""
try:
import faiss # noqa
except ImportError:
test_case = unittest.skip("test requires faiss")(test_case)
return test_case
def require_regex(test_case):
"""
Decorator marking a test that requires regex.
These tests are skipped when Regex isn't installed.
"""
try:
import regex # noqa
except ImportError:
test_case = unittest.skip("test requires regex")(test_case)
return test_case
def require_elasticsearch(test_case):
"""
Decorator marking a test that requires ElasticSearch.
These tests are skipped when ElasticSearch isn't installed.
"""
try:
import elasticsearch # noqa
except ImportError:
test_case = unittest.skip("test requires elasticsearch")(test_case)
return test_case
def require_torch(test_case):
"""
Decorator marking a test that requires PyTorch.
These tests are skipped when PyTorch isn't installed.
"""
if not config.TORCH_AVAILABLE:
test_case = unittest.skip("test requires PyTorch")(test_case)
return test_case
def require_tf(test_case):
"""
Decorator marking a test that requires TensorFlow.
These tests are skipped when TensorFlow isn't installed.
"""
if not config.TF_AVAILABLE:
test_case = unittest.skip("test requires TensorFlow")(test_case)
return test_case
def require_jax(test_case):
"""
Decorator marking a test that requires JAX.
These tests are skipped when JAX isn't installed.
"""
if not config.JAX_AVAILABLE:
test_case = unittest.skip("test requires JAX")(test_case)
return test_case
def require_pil(test_case):
"""
Decorator marking a test that requires Pillow.
These tests are skipped when Pillow isn't installed.
"""
if not config.PIL_AVAILABLE:
test_case = unittest.skip("test requires Pillow")(test_case)
return test_case
def require_transformers(test_case):
"""
Decorator marking a test that requires transformers.
These tests are skipped when transformers isn't installed.
"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("test requires transformers")(test_case)
else:
return test_case
def slow(test_case):
"""
Decorator marking a test as slow.
Slow tests are skipped by default. Set the RUN_SLOW environment variable
to a truthy value to run them.
"""
if not _run_slow_tests or _run_slow_tests == 0:
test_case = unittest.skip("test is slow")(test_case)
return test_case
def local(test_case):
"""
Decorator marking a test as local
Local tests are run by default. Set the RUN_LOCAL environment variable
to a falsy value to not run them.
"""
if not _run_local_tests or _run_local_tests == 0:
test_case = unittest.skip("test is local")(test_case)
return test_case
def packaged(test_case):
"""
Decorator marking a test as packaged
Packaged tests are run by default. Set the RUN_PACKAGED environment variable
to a falsy value to not run them.
"""
if not _run_packaged_tests or _run_packaged_tests == 0:
test_case = unittest.skip("test is packaged")(test_case)
return test_case
def remote(test_case):
"""
Decorator marking a test as one that relies on GitHub or the Hugging Face Hub.
Remote tests are skipped by default. Set the RUN_REMOTE environment variable
to a falsy value to not run them.
"""
if not _run_remote_tests or _run_remote_tests == 0:
test_case = unittest.skip("test requires remote")(test_case)
return test_case
def for_all_test_methods(*decorators):
def decorate(cls):
for name, fn in cls.__dict__.items():
if callable(fn) and name.startswith("test"):
for decorator in decorators:
fn = decorator(fn)
setattr(cls, name, fn)
return cls
return decorate
class RequestWouldHangIndefinitelyError(Exception):
pass
class OfflineSimulationMode(Enum):
CONNECTION_FAILS = 0
CONNECTION_TIMES_OUT = 1
HF_EVALUATE_OFFLINE_SET_TO_1 = 2
@contextmanager
def offline(mode=OfflineSimulationMode.CONNECTION_FAILS, timeout=1e-16):
"""
Simulate offline mode.
There are three offline simulatiom modes:
CONNECTION_FAILS (default mode): a ConnectionError is raised for each network call.
Connection errors are created by mocking socket.socket
CONNECTION_TIMES_OUT: the connection hangs until it times out.
The default timeout value is low (1e-16) to speed up the tests.
Timeout errors are created by mocking requests.request
HF_EVALUATE_OFFLINE_SET_TO_1: the HF_EVALUATE_OFFLINE environment variable is set to 1.
This makes the http/ftp calls of the library instantly fail and raise an OfflineModeEmabled error.
"""
from requests import request as online_request
def timeout_request(method, url, **kwargs):
# Change the url to an invalid url so that the connection hangs
invalid_url = "https://10.255.255.1"
if kwargs.get("timeout") is None:
raise RequestWouldHangIndefinitelyError(
f"Tried a call to {url} in offline mode with no timeout set. Please set a timeout."
)
kwargs["timeout"] = timeout
try:
return online_request(method, invalid_url, **kwargs)
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
e.request.url = url
max_retry_error = e.args[0]
max_retry_error.args = (max_retry_error.args[0].replace("10.255.255.1", f"OfflineMock[{url}]"),)
e.args = (max_retry_error,)
raise
def offline_socket(*args, **kwargs):
raise OSError("Offline mode is enabled.")
if mode is OfflineSimulationMode.CONNECTION_FAILS:
# inspired from https://stackoverflow.com/a/18601897
with patch("socket.socket", offline_socket):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("requests.request", timeout_request):
with patch("requests.api.request", timeout_request):
yield
elif mode is OfflineSimulationMode.HF_EVALUATE_OFFLINE_SET_TO_1:
with patch("evaluate.config.HF_EVALUATE_OFFLINE", True):
yield
else:
raise ValueError("Please use a value from the OfflineSimulationMode enum.")
@contextmanager
def set_current_working_directory_to_temp_dir(*args, **kwargs):
original_working_dir = str(Path().resolve())
with tempfile.TemporaryDirectory(*args, **kwargs) as tmp_dir:
try:
os.chdir(tmp_dir)
yield
finally:
os.chdir(original_working_dir)
def is_rng_equal(rng1, rng2):
return deepcopy(rng1).integers(0, 100, 10).tolist() == deepcopy(rng2).integers(0, 100, 10).tolist()
| evaluate/tests/utils.py/0 | {
"file_path": "evaluate/tests/utils.py",
"repo_id": "evaluate",
"token_count": 3299
} | 296 |
import datetime
import functools
import os
import socket
import traceback
from typing import List
import requests
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
def chime_sender(webhook_url: str, user_mentions: List[str] = []):
"""
Chime sender wrapper: execute func, send a chime notification with the end status
(successfully finished or crashed) at the end. Also send a Chime notification before
executing func.
`webhook_url`: str
The webhook URL to access your chime room.
Visit https://docs.aws.amazon.com/chime/latest/dg/webhooks.html for more details.
`user_mentions`: List[str] (default=[])
Optional users alias or full email address to notify.
"""
dump = {}
def decorator_sender(func):
@functools.wraps(func)
def wrapper_sender(*args, **kwargs):
start_time = datetime.datetime.now()
host_name = socket.gethostname()
func_name = func.__name__
# Handling distributed training edge case.
# In PyTorch, the launch of `torch.distributed.launch` sets up a RANK environment variable for each process.
# This can be used to detect the master process.
# See https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py#L211
# Except for errors, only the master process will send notifications.
if 'RANK' in os.environ:
master_process = (int(os.environ['RANK']) == 0)
host_name += ' - RANK: %s' % os.environ['RANK']
else:
master_process = True
if master_process:
contents = [
'Your training has started 🎬',
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
' '.join(user_mentions)
]
dump['Content'] = '\n'.join(contents)
requests.post(url=webhook_url, json=dump)
try:
value = func(*args, **kwargs)
if master_process:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = [
"Your training is complete 🎉",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'End date: %s' % end_time.strftime(DATE_FORMAT),
'Training duration: %s' % str(elapsed_time)
]
try:
str_value = str(value)
contents.append('Main call returned value: %s' % str_value)
except:
contents.append('Main call returned value: %s' %
"ERROR - Couldn't str the returned value.")
contents.append(' '.join(user_mentions))
dump['Content'] = '\n'.join(contents)
requests.post(url=webhook_url, json=dump)
return value
except Exception as ex:
end_time = datetime.datetime.now()
elapsed_time = end_time - start_time
contents = [
"Your training has crashed ☠️",
'Machine name: %s' % host_name,
'Main call: %s' % func_name,
'Starting date: %s' % start_time.strftime(DATE_FORMAT),
'Crash date: %s' % end_time.strftime(DATE_FORMAT),
'Crashed training duration: %s\n\n' % str(elapsed_time),
"Here's the error:", '%s\n\n' % ex,
"Traceback:", '%s' % traceback.format_exc(),
' '.join(user_mentions)
]
dump['Content'] = '\n'.join(contents)
requests.post(url=webhook_url, json=dump)
raise ex
return wrapper_sender
return decorator_sender
| knockknock/knockknock/chime_sender.py/0 | {
"file_path": "knockknock/knockknock/chime_sender.py",
"repo_id": "knockknock",
"token_count": 2186
} | 297 |
# Video benchmark
## Questions
What is the optimal trade-off between:
- maximizing loading time with random access,
- minimizing memory space on disk,
- maximizing success rate of policies?
How to encode videos?
- How much compression (`-crf`)? Low compression with `0`, normal compression with `20` or extreme with `56`?
- What pixel format to use (`-pix_fmt`)? `yuv444p` or `yuv420p`?
- How many key frames (`-g`)? A key frame every `10` frames?
How to decode videos?
- Which `decoder`? `torchvision`, `torchaudio`, `ffmpegio`, `decord`, or `nvc`?
## Metrics
**Percentage of data compression (higher is better)**
`compression_factor` is the ratio of the memory space on disk taken by the original images to encode, to the memory space taken by the encoded video. For instance, `compression_factor=4` means that the video takes 4 times less memory space on disk compared to the original images.
**Percentage of loading time (higher is better)**
`load_time_factor` is the ratio of the time it takes to load original images at given timestamps, to the time it takes to decode the exact same frames from the video. Higher is better. For instance, `load_time_factor=0.5` means that decoding from video is 2 times slower than loading the original images.
**Average L2 error per pixel (lower is better)**
`avg_per_pixel_l2_error` is the average L2 error between each decoded frame and its corresponding original image over all requested timestamps, and also divided by the number of pixels in the image to be comparable when switching to different image sizes.
**Loss of a pretrained policy (higher is better)** (not available)
`loss_pretrained` is the result of evaluating with the selected encoding/decoding settings a policy pretrained on original images. It is easier to understand than `avg_l2_error`.
**Success rate after retraining (higher is better)** (not available)
`success_rate` is the result of training and evaluating a policy with the selected encoding/decoding settings. It is the most difficult metric to get but also the very best.
## Variables
**Image content**
We don't expect the same optimal settings for a dataset of images from a simulation, or from real-world in an appartment, or in a factory, or outdoor, etc. Hence, we run this benchmark on two datasets: `pusht` (simulation) and `umi` (real-world outdoor).
**Requested timestamps**
In this benchmark, we focus on the loading time of random access, so we are not interested in sequentially loading all frames of a video like in a movie. However, the number of consecutive timestamps requested and their spacing can greatly affect the `load_time_factor`. In fact, it is expected to get faster loading time by decoding a large number of consecutive frames from a video, than to load the same data from individual images. To reflect our robotics use case, we consider a few settings:
- `single_frame`: 1 frame,
- `2_frames`: 2 consecutive frames (e.g. `[t, t + 1 / fps]`),
- `2_frames_4_space`: 2 consecutive frames with 4 frames of spacing (e.g `[t, t + 4 / fps]`),
**Data augmentations**
We might revisit this benchmark and find better settings if we train our policies with various data augmentations to make them more robust (e.g. robust to color changes, compression, etc.).
## Results
**`decoder`**
| repo_id | decoder | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- |
| lerobot/pusht | <span style="color: #32CD32;">torchvision</span> | 0.166 | 0.0000119 |
| lerobot/pusht | ffmpegio | 0.009 | 0.0001182 |
| lerobot/pusht | torchaudio | 0.138 | 0.0000359 |
| lerobot/umi_cup_in_the_wild | <span style="color: #32CD32;">torchvision</span> | 0.174 | 0.0000174 |
| lerobot/umi_cup_in_the_wild | ffmpegio | 0.010 | 0.0000735 |
| lerobot/umi_cup_in_the_wild | torchaudio | 0.154 | 0.0000340 |
### `1_frame`
**`pix_fmt`**
| repo_id | pix_fmt | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | yuv420p | 3.788 | 0.224 | 0.0000760 |
| lerobot/pusht | yuv444p | 3.646 | 0.185 | 0.0000443 |
| lerobot/umi_cup_in_the_wild | yuv420p | 14.391 | 0.388 | 0.0000469 |
| lerobot/umi_cup_in_the_wild | yuv444p | 14.932 | 0.329 | 0.0000397 |
**`g`**
| repo_id | g | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 1 | 2.543 | 0.204 | 0.0000556 |
| lerobot/pusht | 2 | 3.646 | 0.182 | 0.0000443 |
| lerobot/pusht | 3 | 4.431 | 0.174 | 0.0000450 |
| lerobot/pusht | 4 | 5.103 | 0.163 | 0.0000448 |
| lerobot/pusht | 5 | 5.625 | 0.163 | 0.0000436 |
| lerobot/pusht | 6 | 5.974 | 0.155 | 0.0000427 |
| lerobot/pusht | 10 | 6.814 | 0.130 | 0.0000410 |
| lerobot/pusht | 15 | 7.431 | 0.105 | 0.0000406 |
| lerobot/pusht | 20 | 7.662 | 0.097 | 0.0000400 |
| lerobot/pusht | 40 | 8.163 | 0.061 | 0.0000405 |
| lerobot/pusht | 100 | 8.761 | 0.039 | 0.0000422 |
| lerobot/pusht | None | 8.909 | 0.024 | 0.0000431 |
| lerobot/umi_cup_in_the_wild | 1 | 14.411 | 0.444 | 0.0000601 |
| lerobot/umi_cup_in_the_wild | 2 | 14.932 | 0.345 | 0.0000397 |
| lerobot/umi_cup_in_the_wild | 3 | 20.174 | 0.282 | 0.0000416 |
| lerobot/umi_cup_in_the_wild | 4 | 24.889 | 0.271 | 0.0000415 |
| lerobot/umi_cup_in_the_wild | 5 | 28.825 | 0.260 | 0.0000415 |
| lerobot/umi_cup_in_the_wild | 6 | 31.635 | 0.249 | 0.0000415 |
| lerobot/umi_cup_in_the_wild | 10 | 39.418 | 0.195 | 0.0000399 |
| lerobot/umi_cup_in_the_wild | 15 | 44.577 | 0.169 | 0.0000394 |
| lerobot/umi_cup_in_the_wild | 20 | 47.907 | 0.140 | 0.0000390 |
| lerobot/umi_cup_in_the_wild | 40 | 52.554 | 0.096 | 0.0000384 |
| lerobot/umi_cup_in_the_wild | 100 | 58.241 | 0.046 | 0.0000390 |
| lerobot/umi_cup_in_the_wild | None | 60.530 | 0.022 | 0.0000400 |
**`crf`**
| repo_id | crf | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 0 | 1.699 | 0.175 | 0.0000035 |
| lerobot/pusht | 5 | 1.409 | 0.181 | 0.0000080 |
| lerobot/pusht | 10 | 1.842 | 0.172 | 0.0000123 |
| lerobot/pusht | 15 | 2.322 | 0.187 | 0.0000211 |
| lerobot/pusht | 20 | 3.050 | 0.181 | 0.0000346 |
| lerobot/pusht | None | 3.646 | 0.189 | 0.0000443 |
| lerobot/pusht | 25 | 3.969 | 0.186 | 0.0000521 |
| lerobot/pusht | 30 | 5.687 | 0.184 | 0.0000850 |
| lerobot/pusht | 40 | 10.818 | 0.193 | 0.0001726 |
| lerobot/pusht | 50 | 18.185 | 0.183 | 0.0002606 |
| lerobot/umi_cup_in_the_wild | 0 | 1.918 | 0.165 | 0.0000056 |
| lerobot/umi_cup_in_the_wild | 5 | 3.207 | 0.171 | 0.0000111 |
| lerobot/umi_cup_in_the_wild | 10 | 4.818 | 0.212 | 0.0000153 |
| lerobot/umi_cup_in_the_wild | 15 | 7.329 | 0.261 | 0.0000218 |
| lerobot/umi_cup_in_the_wild | 20 | 11.361 | 0.312 | 0.0000317 |
| lerobot/umi_cup_in_the_wild | None | 14.932 | 0.339 | 0.0000397 |
| lerobot/umi_cup_in_the_wild | 25 | 17.741 | 0.297 | 0.0000452 |
| lerobot/umi_cup_in_the_wild | 30 | 27.983 | 0.406 | 0.0000629 |
| lerobot/umi_cup_in_the_wild | 40 | 82.449 | 0.468 | 0.0001184 |
| lerobot/umi_cup_in_the_wild | 50 | 186.145 | 0.515 | 0.0001879 |
**best**
| repo_id | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- |
| lerobot/pusht | 3.646 | 0.188 | 0.0000443 |
| lerobot/umi_cup_in_the_wild | 14.932 | 0.339 | 0.0000397 |
### `2_frames`
**`pix_fmt`**
| repo_id | pix_fmt | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | yuv420p | 3.788 | 0.314 | 0.0000799 |
| lerobot/pusht | yuv444p | 3.646 | 0.303 | 0.0000496 |
| lerobot/umi_cup_in_the_wild | yuv420p | 14.391 | 0.642 | 0.0000503 |
| lerobot/umi_cup_in_the_wild | yuv444p | 14.932 | 0.529 | 0.0000436 |
**`g`**
| repo_id | g | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 1 | 2.543 | 0.308 | 0.0000599 |
| lerobot/pusht | 2 | 3.646 | 0.279 | 0.0000496 |
| lerobot/pusht | 3 | 4.431 | 0.259 | 0.0000498 |
| lerobot/pusht | 4 | 5.103 | 0.243 | 0.0000501 |
| lerobot/pusht | 5 | 5.625 | 0.235 | 0.0000492 |
| lerobot/pusht | 6 | 5.974 | 0.230 | 0.0000481 |
| lerobot/pusht | 10 | 6.814 | 0.194 | 0.0000468 |
| lerobot/pusht | 15 | 7.431 | 0.152 | 0.0000460 |
| lerobot/pusht | 20 | 7.662 | 0.151 | 0.0000455 |
| lerobot/pusht | 40 | 8.163 | 0.095 | 0.0000454 |
| lerobot/pusht | 100 | 8.761 | 0.062 | 0.0000472 |
| lerobot/pusht | None | 8.909 | 0.037 | 0.0000479 |
| lerobot/umi_cup_in_the_wild | 1 | 14.411 | 0.638 | 0.0000625 |
| lerobot/umi_cup_in_the_wild | 2 | 14.932 | 0.537 | 0.0000436 |
| lerobot/umi_cup_in_the_wild | 3 | 20.174 | 0.493 | 0.0000437 |
| lerobot/umi_cup_in_the_wild | 4 | 24.889 | 0.458 | 0.0000446 |
| lerobot/umi_cup_in_the_wild | 5 | 28.825 | 0.438 | 0.0000445 |
| lerobot/umi_cup_in_the_wild | 6 | 31.635 | 0.424 | 0.0000444 |
| lerobot/umi_cup_in_the_wild | 10 | 39.418 | 0.345 | 0.0000435 |
| lerobot/umi_cup_in_the_wild | 15 | 44.577 | 0.313 | 0.0000417 |
| lerobot/umi_cup_in_the_wild | 20 | 47.907 | 0.264 | 0.0000421 |
| lerobot/umi_cup_in_the_wild | 40 | 52.554 | 0.185 | 0.0000414 |
| lerobot/umi_cup_in_the_wild | 100 | 58.241 | 0.090 | 0.0000420 |
| lerobot/umi_cup_in_the_wild | None | 60.530 | 0.042 | 0.0000424 |
**`crf`**
| repo_id | crf | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 0 | 1.699 | 0.302 | 0.0000097 |
| lerobot/pusht | 5 | 1.409 | 0.287 | 0.0000142 |
| lerobot/pusht | 10 | 1.842 | 0.283 | 0.0000184 |
| lerobot/pusht | 15 | 2.322 | 0.305 | 0.0000268 |
| lerobot/pusht | 20 | 3.050 | 0.285 | 0.0000402 |
| lerobot/pusht | None | 3.646 | 0.285 | 0.0000496 |
| lerobot/pusht | 25 | 3.969 | 0.293 | 0.0000572 |
| lerobot/pusht | 30 | 5.687 | 0.293 | 0.0000893 |
| lerobot/pusht | 40 | 10.818 | 0.319 | 0.0001762 |
| lerobot/pusht | 50 | 18.185 | 0.304 | 0.0002626 |
| lerobot/umi_cup_in_the_wild | 0 | 1.918 | 0.235 | 0.0000112 |
| lerobot/umi_cup_in_the_wild | 5 | 3.207 | 0.261 | 0.0000166 |
| lerobot/umi_cup_in_the_wild | 10 | 4.818 | 0.333 | 0.0000207 |
| lerobot/umi_cup_in_the_wild | 15 | 7.329 | 0.406 | 0.0000267 |
| lerobot/umi_cup_in_the_wild | 20 | 11.361 | 0.489 | 0.0000361 |
| lerobot/umi_cup_in_the_wild | None | 14.932 | 0.537 | 0.0000436 |
| lerobot/umi_cup_in_the_wild | 25 | 17.741 | 0.578 | 0.0000487 |
| lerobot/umi_cup_in_the_wild | 30 | 27.983 | 0.453 | 0.0000655 |
| lerobot/umi_cup_in_the_wild | 40 | 82.449 | 0.767 | 0.0001192 |
| lerobot/umi_cup_in_the_wild | 50 | 186.145 | 0.816 | 0.0001881 |
**best**
| repo_id | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- |
| lerobot/pusht | 3.646 | 0.283 | 0.0000496 |
| lerobot/umi_cup_in_the_wild | 14.932 | 0.543 | 0.0000436 |
### `2_frames_4_space`
**`pix_fmt`**
| repo_id | pix_fmt | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | yuv420p | 3.788 | 0.257 | 0.0000855 |
| lerobot/pusht | yuv444p | 3.646 | 0.261 | 0.0000556 |
| lerobot/umi_cup_in_the_wild | yuv420p | 14.391 | 0.493 | 0.0000476 |
| lerobot/umi_cup_in_the_wild | yuv444p | 14.932 | 0.371 | 0.0000404 |
**`g`**
| repo_id | g | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 1 | 2.543 | 0.226 | 0.0000670 |
| lerobot/pusht | 2 | 3.646 | 0.222 | 0.0000556 |
| lerobot/pusht | 3 | 4.431 | 0.217 | 0.0000567 |
| lerobot/pusht | 4 | 5.103 | 0.204 | 0.0000555 |
| lerobot/pusht | 5 | 5.625 | 0.179 | 0.0000556 |
| lerobot/pusht | 6 | 5.974 | 0.188 | 0.0000544 |
| lerobot/pusht | 10 | 6.814 | 0.160 | 0.0000531 |
| lerobot/pusht | 15 | 7.431 | 0.150 | 0.0000521 |
| lerobot/pusht | 20 | 7.662 | 0.123 | 0.0000519 |
| lerobot/pusht | 40 | 8.163 | 0.092 | 0.0000519 |
| lerobot/pusht | 100 | 8.761 | 0.053 | 0.0000533 |
| lerobot/pusht | None | 8.909 | 0.034 | 0.0000541 |
| lerobot/umi_cup_in_the_wild | 1 | 14.411 | 0.409 | 0.0000607 |
| lerobot/umi_cup_in_the_wild | 2 | 14.932 | 0.381 | 0.0000404 |
| lerobot/umi_cup_in_the_wild | 3 | 20.174 | 0.355 | 0.0000418 |
| lerobot/umi_cup_in_the_wild | 4 | 24.889 | 0.346 | 0.0000425 |
| lerobot/umi_cup_in_the_wild | 5 | 28.825 | 0.354 | 0.0000419 |
| lerobot/umi_cup_in_the_wild | 6 | 31.635 | 0.336 | 0.0000419 |
| lerobot/umi_cup_in_the_wild | 10 | 39.418 | 0.314 | 0.0000402 |
| lerobot/umi_cup_in_the_wild | 15 | 44.577 | 0.269 | 0.0000397 |
| lerobot/umi_cup_in_the_wild | 20 | 47.907 | 0.246 | 0.0000395 |
| lerobot/umi_cup_in_the_wild | 40 | 52.554 | 0.171 | 0.0000390 |
| lerobot/umi_cup_in_the_wild | 100 | 58.241 | 0.091 | 0.0000399 |
| lerobot/umi_cup_in_the_wild | None | 60.530 | 0.043 | 0.0000409 |
**`crf`**
| repo_id | crf | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 0 | 1.699 | 0.212 | 0.0000193 |
| lerobot/pusht | 5 | 1.409 | 0.211 | 0.0000232 |
| lerobot/pusht | 10 | 1.842 | 0.199 | 0.0000270 |
| lerobot/pusht | 15 | 2.322 | 0.198 | 0.0000347 |
| lerobot/pusht | 20 | 3.050 | 0.211 | 0.0000469 |
| lerobot/pusht | None | 3.646 | 0.206 | 0.0000556 |
| lerobot/pusht | 25 | 3.969 | 0.210 | 0.0000626 |
| lerobot/pusht | 30 | 5.687 | 0.223 | 0.0000927 |
| lerobot/pusht | 40 | 10.818 | 0.227 | 0.0001763 |
| lerobot/pusht | 50 | 18.185 | 0.223 | 0.0002625 |
| lerobot/umi_cup_in_the_wild | 0 | 1.918 | 0.147 | 0.0000071 |
| lerobot/umi_cup_in_the_wild | 5 | 3.207 | 0.182 | 0.0000125 |
| lerobot/umi_cup_in_the_wild | 10 | 4.818 | 0.222 | 0.0000166 |
| lerobot/umi_cup_in_the_wild | 15 | 7.329 | 0.270 | 0.0000229 |
| lerobot/umi_cup_in_the_wild | 20 | 11.361 | 0.325 | 0.0000326 |
| lerobot/umi_cup_in_the_wild | None | 14.932 | 0.362 | 0.0000404 |
| lerobot/umi_cup_in_the_wild | 25 | 17.741 | 0.390 | 0.0000459 |
| lerobot/umi_cup_in_the_wild | 30 | 27.983 | 0.437 | 0.0000633 |
| lerobot/umi_cup_in_the_wild | 40 | 82.449 | 0.499 | 0.0001186 |
| lerobot/umi_cup_in_the_wild | 50 | 186.145 | 0.564 | 0.0001879 |
**best**
| repo_id | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- |
| lerobot/pusht | 3.646 | 0.224 | 0.0000556 |
| lerobot/umi_cup_in_the_wild | 14.932 | 0.368 | 0.0000404 |
### `6_frames`
**`pix_fmt`**
| repo_id | pix_fmt | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | yuv420p | 3.788 | 0.660 | 0.0000839 |
| lerobot/pusht | yuv444p | 3.646 | 0.546 | 0.0000542 |
| lerobot/umi_cup_in_the_wild | yuv420p | 14.391 | 1.225 | 0.0000497 |
| lerobot/umi_cup_in_the_wild | yuv444p | 14.932 | 0.908 | 0.0000428 |
**`g`**
| repo_id | g | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 1 | 2.543 | 0.552 | 0.0000646 |
| lerobot/pusht | 2 | 3.646 | 0.534 | 0.0000542 |
| lerobot/pusht | 3 | 4.431 | 0.563 | 0.0000546 |
| lerobot/pusht | 4 | 5.103 | 0.537 | 0.0000545 |
| lerobot/pusht | 5 | 5.625 | 0.477 | 0.0000532 |
| lerobot/pusht | 6 | 5.974 | 0.515 | 0.0000530 |
| lerobot/pusht | 10 | 6.814 | 0.410 | 0.0000512 |
| lerobot/pusht | 15 | 7.431 | 0.405 | 0.0000503 |
| lerobot/pusht | 20 | 7.662 | 0.345 | 0.0000500 |
| lerobot/pusht | 40 | 8.163 | 0.247 | 0.0000496 |
| lerobot/pusht | 100 | 8.761 | 0.147 | 0.0000510 |
| lerobot/pusht | None | 8.909 | 0.100 | 0.0000519 |
| lerobot/umi_cup_in_the_wild | 1 | 14.411 | 0.997 | 0.0000620 |
| lerobot/umi_cup_in_the_wild | 2 | 14.932 | 0.911 | 0.0000428 |
| lerobot/umi_cup_in_the_wild | 3 | 20.174 | 0.869 | 0.0000433 |
| lerobot/umi_cup_in_the_wild | 4 | 24.889 | 0.874 | 0.0000438 |
| lerobot/umi_cup_in_the_wild | 5 | 28.825 | 0.864 | 0.0000439 |
| lerobot/umi_cup_in_the_wild | 6 | 31.635 | 0.834 | 0.0000440 |
| lerobot/umi_cup_in_the_wild | 10 | 39.418 | 0.781 | 0.0000421 |
| lerobot/umi_cup_in_the_wild | 15 | 44.577 | 0.679 | 0.0000411 |
| lerobot/umi_cup_in_the_wild | 20 | 47.907 | 0.652 | 0.0000410 |
| lerobot/umi_cup_in_the_wild | 40 | 52.554 | 0.465 | 0.0000404 |
| lerobot/umi_cup_in_the_wild | 100 | 58.241 | 0.245 | 0.0000413 |
| lerobot/umi_cup_in_the_wild | None | 60.530 | 0.116 | 0.0000417 |
**`crf`**
| repo_id | crf | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- | --- |
| lerobot/pusht | 0 | 1.699 | 0.534 | 0.0000163 |
| lerobot/pusht | 5 | 1.409 | 0.524 | 0.0000205 |
| lerobot/pusht | 10 | 1.842 | 0.510 | 0.0000245 |
| lerobot/pusht | 15 | 2.322 | 0.512 | 0.0000324 |
| lerobot/pusht | 20 | 3.050 | 0.508 | 0.0000452 |
| lerobot/pusht | None | 3.646 | 0.518 | 0.0000542 |
| lerobot/pusht | 25 | 3.969 | 0.534 | 0.0000616 |
| lerobot/pusht | 30 | 5.687 | 0.530 | 0.0000927 |
| lerobot/pusht | 40 | 10.818 | 0.552 | 0.0001777 |
| lerobot/pusht | 50 | 18.185 | 0.564 | 0.0002644 |
| lerobot/umi_cup_in_the_wild | 0 | 1.918 | 0.401 | 0.0000101 |
| lerobot/umi_cup_in_the_wild | 5 | 3.207 | 0.499 | 0.0000156 |
| lerobot/umi_cup_in_the_wild | 10 | 4.818 | 0.599 | 0.0000197 |
| lerobot/umi_cup_in_the_wild | 15 | 7.329 | 0.704 | 0.0000258 |
| lerobot/umi_cup_in_the_wild | 20 | 11.361 | 0.834 | 0.0000352 |
| lerobot/umi_cup_in_the_wild | None | 14.932 | 0.925 | 0.0000428 |
| lerobot/umi_cup_in_the_wild | 25 | 17.741 | 0.978 | 0.0000480 |
| lerobot/umi_cup_in_the_wild | 30 | 27.983 | 1.088 | 0.0000648 |
| lerobot/umi_cup_in_the_wild | 40 | 82.449 | 1.324 | 0.0001190 |
| lerobot/umi_cup_in_the_wild | 50 | 186.145 | 1.436 | 0.0001880 |
**best**
| repo_id | compression_factor | load_time_factor | avg_per_pixel_l2_error |
| --- | --- | --- | --- |
| lerobot/pusht | 3.646 | 0.546 | 0.0000542 |
| lerobot/umi_cup_in_the_wild | 14.932 | 0.934 | 0.0000428 |
| lerobot/lerobot/common/datasets/_video_benchmark/README.md/0 | {
"file_path": "lerobot/lerobot/common/datasets/_video_benchmark/README.md",
"repo_id": "lerobot",
"token_count": 7745
} | 298 |
"""
Contains utilities to process raw data format of HDF5 files like in: https://github.com/tonyzhaozh/act
"""
import re
import shutil
from pathlib import Path
import h5py
import numpy as np
import torch
import tqdm
from datasets import Dataset, Features, Image, Sequence, Value
from PIL import Image as PILImage
from lerobot.common.datasets.push_dataset_to_hub.utils import concatenate_episodes, save_images_concurrently
from lerobot.common.datasets.utils import (
hf_transform_to_torch,
)
from lerobot.common.datasets.video_utils import VideoFrame, encode_video_frames
def get_cameras(hdf5_data):
# ignore depth channel, not currently handled
# TODO(rcadene): add depth
rgb_cameras = [key for key in hdf5_data["/observations/images"].keys() if "depth" not in key] # noqa: SIM118
return rgb_cameras
def check_format(raw_dir) -> bool:
# only frames from simulation are uncompressed
compressed_images = "sim" not in raw_dir.name
hdf5_paths = list(raw_dir.glob("episode_*.hdf5"))
assert len(hdf5_paths) != 0
for hdf5_path in hdf5_paths:
with h5py.File(hdf5_path, "r") as data:
assert "/action" in data
assert "/observations/qpos" in data
assert data["/action"].ndim == 2
assert data["/observations/qpos"].ndim == 2
num_frames = data["/action"].shape[0]
assert num_frames == data["/observations/qpos"].shape[0]
for camera in get_cameras(data):
assert num_frames == data[f"/observations/images/{camera}"].shape[0]
if compressed_images:
assert data[f"/observations/images/{camera}"].ndim == 2
else:
assert data[f"/observations/images/{camera}"].ndim == 4
b, h, w, c = data[f"/observations/images/{camera}"].shape
assert c < h and c < w, f"Expect (h,w,c) image format but ({h=},{w=},{c=}) provided."
def load_from_raw(raw_dir, out_dir, fps, video, debug):
# only frames from simulation are uncompressed
compressed_images = "sim" not in raw_dir.name
hdf5_files = list(raw_dir.glob("*.hdf5"))
ep_dicts = []
episode_data_index = {"from": [], "to": []}
id_from = 0
for ep_path in tqdm.tqdm(hdf5_files, total=len(hdf5_files)):
with h5py.File(ep_path, "r") as ep:
ep_idx = int(re.search(r"episode_(\d+)", ep_path.name).group(1))
num_frames = ep["/action"].shape[0]
# last step of demonstration is considered done
done = torch.zeros(num_frames, dtype=torch.bool)
done[-1] = True
state = torch.from_numpy(ep["/observations/qpos"][:])
action = torch.from_numpy(ep["/action"][:])
ep_dict = {}
for camera in get_cameras(ep):
img_key = f"observation.images.{camera}"
if compressed_images:
import cv2
# load one compressed image after the other in RAM and uncompress
imgs_array = []
for data in ep[f"/observations/images/{camera}"]:
imgs_array.append(cv2.imdecode(data, 1))
imgs_array = np.array(imgs_array)
else:
# load all images in RAM
imgs_array = ep[f"/observations/images/{camera}"][:]
if video:
# save png images in temporary directory
tmp_imgs_dir = out_dir / "tmp_images"
save_images_concurrently(imgs_array, tmp_imgs_dir)
# encode images to a mp4 video
fname = f"{img_key}_episode_{ep_idx:06d}.mp4"
video_path = out_dir / "videos" / fname
encode_video_frames(tmp_imgs_dir, video_path, fps)
# clean temporary images directory
shutil.rmtree(tmp_imgs_dir)
# store the reference to the video frame
ep_dict[img_key] = [
{"path": f"videos/{fname}", "timestamp": i / fps} for i in range(num_frames)
]
else:
ep_dict[img_key] = [PILImage.fromarray(x) for x in imgs_array]
ep_dict["observation.state"] = state
ep_dict["action"] = action
ep_dict["episode_index"] = torch.tensor([ep_idx] * num_frames)
ep_dict["frame_index"] = torch.arange(0, num_frames, 1)
ep_dict["timestamp"] = torch.arange(0, num_frames, 1) / fps
ep_dict["next.done"] = done
# TODO(rcadene): add reward and success by computing them in sim
assert isinstance(ep_idx, int)
ep_dicts.append(ep_dict)
episode_data_index["from"].append(id_from)
episode_data_index["to"].append(id_from + num_frames)
id_from += num_frames
# process first episode only
if debug:
break
data_dict = concatenate_episodes(ep_dicts)
return data_dict, episode_data_index
def to_hf_dataset(data_dict, video) -> Dataset:
features = {}
keys = [key for key in data_dict if "observation.images." in key]
for key in keys:
if video:
features[key] = VideoFrame()
else:
features[key] = Image()
features["observation.state"] = Sequence(
length=data_dict["observation.state"].shape[1], feature=Value(dtype="float32", id=None)
)
features["action"] = Sequence(
length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)
)
features["episode_index"] = Value(dtype="int64", id=None)
features["frame_index"] = Value(dtype="int64", id=None)
features["timestamp"] = Value(dtype="float32", id=None)
features["next.done"] = Value(dtype="bool", id=None)
features["index"] = Value(dtype="int64", id=None)
hf_dataset = Dataset.from_dict(data_dict, features=Features(features))
hf_dataset.set_transform(hf_transform_to_torch)
return hf_dataset
def from_raw_to_lerobot_format(raw_dir: Path, out_dir: Path, fps=None, video=True, debug=False):
# sanity check
check_format(raw_dir)
if fps is None:
fps = 50
data_dir, episode_data_index = load_from_raw(raw_dir, out_dir, fps, video, debug)
hf_dataset = to_hf_dataset(data_dir, video)
info = {
"fps": fps,
"video": video,
}
return hf_dataset, episode_data_index, info
| lerobot/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py/0 | {
"file_path": "lerobot/lerobot/common/datasets/push_dataset_to_hub/aloha_hdf5_format.py",
"repo_id": "lerobot",
"token_count": 3118
} | 299 |
import torch
from torch import Tensor, nn
def create_stats_buffers(
shapes: dict[str, list[int]],
modes: dict[str, str],
stats: dict[str, dict[str, Tensor]] | None = None,
) -> dict[str, dict[str, nn.ParameterDict]]:
"""
Create buffers per modality (e.g. "observation.image", "action") containing their mean, std, min, max
statistics.
Args: (see Normalize and Unnormalize)
Returns:
dict: A dictionary where keys are modalities and values are `nn.ParameterDict` containing
`nn.Parameters` set to `requires_grad=False`, suitable to not be updated during backpropagation.
"""
stats_buffers = {}
for key, mode in modes.items():
assert mode in ["mean_std", "min_max"]
shape = tuple(shapes[key])
if "image" in key:
# sanity checks
assert len(shape) == 3, f"number of dimensions of {key} != 3 ({shape=}"
c, h, w = shape
assert c < h and c < w, f"{key} is not channel first ({shape=})"
# override image shape to be invariant to height and width
shape = (c, 1, 1)
# Note: we initialize mean, std, min, max to infinity. They should be overwritten
# downstream by `stats` or `policy.load_state_dict`, as expected. During forward,
# we assert they are not infinity anymore.
buffer = {}
if mode == "mean_std":
mean = torch.ones(shape, dtype=torch.float32) * torch.inf
std = torch.ones(shape, dtype=torch.float32) * torch.inf
buffer = nn.ParameterDict(
{
"mean": nn.Parameter(mean, requires_grad=False),
"std": nn.Parameter(std, requires_grad=False),
}
)
elif mode == "min_max":
min = torch.ones(shape, dtype=torch.float32) * torch.inf
max = torch.ones(shape, dtype=torch.float32) * torch.inf
buffer = nn.ParameterDict(
{
"min": nn.Parameter(min, requires_grad=False),
"max": nn.Parameter(max, requires_grad=False),
}
)
if stats is not None:
# Note: The clone is needed to make sure that the logic in save_pretrained doesn't see duplicated
# tensors anywhere (for example, when we use the same stats for normalization and
# unnormalization). See the logic here
# https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/py_src/safetensors/torch.py#L97.
if mode == "mean_std":
buffer["mean"].data = stats[key]["mean"].clone()
buffer["std"].data = stats[key]["std"].clone()
elif mode == "min_max":
buffer["min"].data = stats[key]["min"].clone()
buffer["max"].data = stats[key]["max"].clone()
stats_buffers[key] = buffer
return stats_buffers
def _no_stats_error_str(name: str) -> str:
return (
f"`{name}` is infinity. You should either initialize with `stats` as an argument, or use a "
"pretrained model."
)
class Normalize(nn.Module):
"""Normalizes data (e.g. "observation.image") for more stable and faster convergence during training."""
def __init__(
self,
shapes: dict[str, list[int]],
modes: dict[str, str],
stats: dict[str, dict[str, Tensor]] | None = None,
):
"""
Args:
shapes (dict): A dictionary where keys are input modalities (e.g. "observation.image") and values
are their shapes (e.g. `[3,96,96]`]). These shapes are used to create the tensor buffer containing
mean, std, min, max statistics. If the provided `shapes` contain keys related to images, the shape
is adjusted to be invariant to height and width, assuming a channel-first (c, h, w) format.
modes (dict): A dictionary where keys are output modalities (e.g. "observation.image") and values
are their normalization modes among:
- "mean_std": subtract the mean and divide by standard deviation.
- "min_max": map to [-1, 1] range.
stats (dict, optional): A dictionary where keys are output modalities (e.g. "observation.image")
and values are dictionaries of statistic types and their values (e.g.
`{"mean": torch.randn(3,1,1)}, "std": torch.randn(3,1,1)}`). If provided, as expected for
training the model for the first time, these statistics will overwrite the default buffers. If
not provided, as expected for finetuning or evaluation, the default buffers should to be
overwritten by a call to `policy.load_state_dict(state_dict)`. That way, initializing the
dataset is not needed to get the stats, since they are already in the policy state_dict.
"""
super().__init__()
self.shapes = shapes
self.modes = modes
self.stats = stats
stats_buffers = create_stats_buffers(shapes, modes, stats)
for key, buffer in stats_buffers.items():
setattr(self, "buffer_" + key.replace(".", "_"), buffer)
# TODO(rcadene): should we remove torch.no_grad?
@torch.no_grad
def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
for key, mode in self.modes.items():
buffer = getattr(self, "buffer_" + key.replace(".", "_"))
if mode == "mean_std":
mean = buffer["mean"]
std = buffer["std"]
assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
assert not torch.isinf(std).any(), _no_stats_error_str("std")
batch[key] = (batch[key] - mean) / (std + 1e-8)
elif mode == "min_max":
min = buffer["min"]
max = buffer["max"]
assert not torch.isinf(min).any(), _no_stats_error_str("min")
assert not torch.isinf(max).any(), _no_stats_error_str("max")
# normalize to [0,1]
batch[key] = (batch[key] - min) / (max - min)
# normalize to [-1, 1]
batch[key] = batch[key] * 2 - 1
else:
raise ValueError(mode)
return batch
class Unnormalize(nn.Module):
"""
Similar to `Normalize` but unnormalizes output data (e.g. `{"action": torch.randn(b,c)}`) in their
original range used by the environment.
"""
def __init__(
self,
shapes: dict[str, list[int]],
modes: dict[str, str],
stats: dict[str, dict[str, Tensor]] | None = None,
):
"""
Args:
shapes (dict): A dictionary where keys are input modalities (e.g. "observation.image") and values
are their shapes (e.g. `[3,96,96]`]). These shapes are used to create the tensor buffer containing
mean, std, min, max statistics. If the provided `shapes` contain keys related to images, the shape
is adjusted to be invariant to height and width, assuming a channel-first (c, h, w) format.
modes (dict): A dictionary where keys are output modalities (e.g. "observation.image") and values
are their normalization modes among:
- "mean_std": subtract the mean and divide by standard deviation.
- "min_max": map to [-1, 1] range.
stats (dict, optional): A dictionary where keys are output modalities (e.g. "observation.image")
and values are dictionaries of statistic types and their values (e.g.
`{"mean": torch.randn(3,1,1)}, "std": torch.randn(3,1,1)}`). If provided, as expected for
training the model for the first time, these statistics will overwrite the default buffers. If
not provided, as expected for finetuning or evaluation, the default buffers should to be
overwritten by a call to `policy.load_state_dict(state_dict)`. That way, initializing the
dataset is not needed to get the stats, since they are already in the policy state_dict.
"""
super().__init__()
self.shapes = shapes
self.modes = modes
self.stats = stats
# `self.buffer_observation_state["mean"]` contains `torch.tensor(state_dim)`
stats_buffers = create_stats_buffers(shapes, modes, stats)
for key, buffer in stats_buffers.items():
setattr(self, "buffer_" + key.replace(".", "_"), buffer)
# TODO(rcadene): should we remove torch.no_grad?
@torch.no_grad
def forward(self, batch: dict[str, Tensor]) -> dict[str, Tensor]:
for key, mode in self.modes.items():
buffer = getattr(self, "buffer_" + key.replace(".", "_"))
if mode == "mean_std":
mean = buffer["mean"]
std = buffer["std"]
assert not torch.isinf(mean).any(), _no_stats_error_str("mean")
assert not torch.isinf(std).any(), _no_stats_error_str("std")
batch[key] = batch[key] * std + mean
elif mode == "min_max":
min = buffer["min"]
max = buffer["max"]
assert not torch.isinf(min).any(), _no_stats_error_str("min")
assert not torch.isinf(max).any(), _no_stats_error_str("max")
batch[key] = (batch[key] + 1) / 2
batch[key] = batch[key] * (max - min) + min
else:
raise ValueError(mode)
return batch
| lerobot/lerobot/common/policies/normalize.py/0 | {
"file_path": "lerobot/lerobot/common/policies/normalize.py",
"repo_id": "lerobot",
"token_count": 4299
} | 300 |
"""Evaluate a policy on an environment by running rollouts and computing metrics.
Usage examples:
You want to evaluate a model from the hub (eg: https://huggingface.co/lerobot/diffusion_pusht)
for 10 episodes.
```
python lerobot/scripts/eval.py -p lerobot/diffusion_pusht eval.n_episodes=10
```
OR, you want to evaluate a model checkpoint from the LeRobot training script for 10 episodes.
```
python lerobot/scripts/eval.py \
-p outputs/train/diffusion_pusht/checkpoints/005000 \
eval.n_episodes=10
```
Note that in both examples, the repo/folder should contain at least `config.json`, `config.yaml` and
`model.safetensors`.
Note the formatting for providing the number of episodes. Generally, you may provide any number of arguments
with `qualified.parameter.name=value`. In this case, the parameter eval.n_episodes appears as `n_episodes`
nested under `eval` in the `config.yaml` found at
https://huggingface.co/lerobot/diffusion_pusht/tree/main.
"""
import argparse
import json
import logging
import threading
import time
from copy import deepcopy
from datetime import datetime as dt
from pathlib import Path
from typing import Callable
import einops
import gymnasium as gym
import numpy as np
import torch
from datasets import Dataset, Features, Image, Sequence, Value, concatenate_datasets
from huggingface_hub import snapshot_download
from huggingface_hub.utils._errors import RepositoryNotFoundError
from huggingface_hub.utils._validators import HFValidationError
from PIL import Image as PILImage
from torch import Tensor
from tqdm import trange
from lerobot.common.datasets.factory import make_dataset
from lerobot.common.datasets.utils import hf_transform_to_torch
from lerobot.common.envs.factory import make_env
from lerobot.common.envs.utils import preprocess_observation
from lerobot.common.logger import log_output_dir
from lerobot.common.policies.factory import make_policy
from lerobot.common.policies.policy_protocol import Policy
from lerobot.common.policies.utils import get_device_from_parameters
from lerobot.common.utils.io_utils import write_video
from lerobot.common.utils.utils import get_safe_torch_device, init_hydra_config, init_logging, set_global_seed
def rollout(
env: gym.vector.VectorEnv,
policy: Policy,
seeds: list[int] | None = None,
return_observations: bool = False,
render_callback: Callable[[gym.vector.VectorEnv], None] | None = None,
enable_progbar: bool = False,
) -> dict:
"""Run a batched policy rollout once through a batch of environments.
Note that all environments in the batch are run until the last environment is done. This means some
data will probably need to be discarded (for environments that aren't the first one to be done).
The return dictionary contains:
(optional) "observation": A a dictionary of (batch, sequence + 1, *) tensors mapped to observation
keys. NOTE the that this has an extra sequence element relative to the other keys in the
dictionary. This is because an extra observation is included for after the environment is
terminated or truncated.
"action": A (batch, sequence, action_dim) tensor of actions applied based on the observations (not
including the last observations).
"reward": A (batch, sequence) tensor of rewards received for applying the actions.
"success": A (batch, sequence) tensor of success conditions (the only time this can be True is upon
environment termination/truncation).
"don": A (batch, sequence) tensor of **cumulative** done conditions. For any given batch element,
the first True is followed by True's all the way till the end. This can be used for masking
extraneous elements from the sequences above.
Args:
env: The batch of environments.
policy: The policy.
seeds: The environments are seeded once at the start of the rollout. If provided, this argument
specifies the seeds for each of the environments.
return_observations: Whether to include all observations in the returned rollout data. Observations
are returned optionally because they typically take more memory to cache. Defaults to False.
render_callback: Optional rendering callback to be used after the environments are reset, and after
every step.
enable_progbar: Enable a progress bar over rollout steps.
Returns:
The dictionary described above.
"""
device = get_device_from_parameters(policy)
# Reset the policy and environments.
policy.reset()
observation, info = env.reset(seed=seeds)
if render_callback is not None:
render_callback(env)
all_observations = []
all_actions = []
all_rewards = []
all_successes = []
all_dones = []
step = 0
# Keep track of which environments are done.
done = np.array([False] * env.num_envs)
max_steps = env.call("_max_episode_steps")[0]
progbar = trange(
max_steps,
desc=f"Running rollout with at most {max_steps} steps",
disable=not enable_progbar,
leave=False,
)
while not np.all(done):
# Numpy array to tensor and changing dictionary keys to LeRobot policy format.
observation = preprocess_observation(observation)
if return_observations:
all_observations.append(deepcopy(observation))
observation = {key: observation[key].to(device, non_blocking=True) for key in observation}
with torch.inference_mode():
action = policy.select_action(observation)
# Convert to CPU / numpy.
action = action.to("cpu").numpy()
assert action.ndim == 2, "Action dimensions should be (batch, action_dim)"
# Apply the next action.
observation, reward, terminated, truncated, info = env.step(action)
if render_callback is not None:
render_callback(env)
# VectorEnv stores is_success in `info["final_info"][env_index]["is_success"]`. "final_info" isn't
# available of none of the envs finished.
if "final_info" in info:
successes = [info["is_success"] if info is not None else False for info in info["final_info"]]
else:
successes = [False] * env.num_envs
# Keep track of which environments are done so far.
done = terminated | truncated | done
all_actions.append(torch.from_numpy(action))
all_rewards.append(torch.from_numpy(reward))
all_dones.append(torch.from_numpy(done))
all_successes.append(torch.tensor(successes))
step += 1
running_success_rate = (
einops.reduce(torch.stack(all_successes, dim=1), "b n -> b", "any").numpy().mean()
)
progbar.set_postfix({"running_success_rate": f"{running_success_rate.item() * 100:.1f}%"})
progbar.update()
# Track the final observation.
if return_observations:
observation = preprocess_observation(observation)
all_observations.append(deepcopy(observation))
# Stack the sequence along the first dimension so that we have (batch, sequence, *) tensors.
ret = {
"action": torch.stack(all_actions, dim=1),
"reward": torch.stack(all_rewards, dim=1),
"success": torch.stack(all_successes, dim=1),
"done": torch.stack(all_dones, dim=1),
}
if return_observations:
stacked_observations = {}
for key in all_observations[0]:
stacked_observations[key] = torch.stack([obs[key] for obs in all_observations], dim=1)
ret["observation"] = stacked_observations
return ret
def eval_policy(
env: gym.vector.VectorEnv,
policy: torch.nn.Module,
n_episodes: int,
max_episodes_rendered: int = 0,
video_dir: Path | None = None,
return_episode_data: bool = False,
start_seed: int | None = None,
enable_progbar: bool = False,
enable_inner_progbar: bool = False,
) -> dict:
"""
Args:
env: The batch of environments.
policy: The policy.
n_episodes: The number of episodes to evaluate.
max_episodes_rendered: Maximum number of episodes to render into videos.
video_dir: Where to save rendered videos.
return_episode_data: Whether to return episode data for online training. Incorporates the data into
the "episodes" key of the returned dictionary.
start_seed: The first seed to use for the first individual rollout. For all subsequent rollouts the
seed is incremented by 1. If not provided, the environments are not manually seeded.
enable_progbar: Enable progress bar over batches.
enable_inner_progbar: Enable progress bar over steps in each batch.
Returns:
Dictionary with metrics and data regarding the rollouts.
"""
start = time.time()
policy.eval()
# Determine how many batched rollouts we need to get n_episodes. Note that if n_episodes is not evenly
# divisible by env.num_envs we end up discarding some data in the last batch.
n_batches = n_episodes // env.num_envs + int((n_episodes % env.num_envs) != 0)
# Keep track of some metrics.
sum_rewards = []
max_rewards = []
all_successes = []
all_seeds = []
threads = [] # for video saving threads
n_episodes_rendered = 0 # for saving the correct number of videos
# Callback for visualization.
def render_frame(env: gym.vector.VectorEnv):
# noqa: B023
if n_episodes_rendered >= max_episodes_rendered:
return
n_to_render_now = min(max_episodes_rendered - n_episodes_rendered, env.num_envs)
if isinstance(env, gym.vector.SyncVectorEnv):
ep_frames.append(np.stack([env.envs[i].render() for i in range(n_to_render_now)])) # noqa: B023
elif isinstance(env, gym.vector.AsyncVectorEnv):
# Here we must render all frames and discard any we don't need.
ep_frames.append(np.stack(env.call("render")[:n_to_render_now]))
if max_episodes_rendered > 0:
video_paths: list[str] = []
if return_episode_data:
episode_data: dict | None = None
progbar = trange(n_batches, desc="Stepping through eval batches", disable=not enable_progbar)
for batch_ix in progbar:
# Cache frames for rendering videos. Each item will be (b, h, w, c), and the list indexes the rollout
# step.
if max_episodes_rendered > 0:
ep_frames: list[np.ndarray] = []
seeds = range(start_seed + (batch_ix * env.num_envs), start_seed + ((batch_ix + 1) * env.num_envs))
rollout_data = rollout(
env,
policy,
seeds=seeds,
return_observations=return_episode_data,
render_callback=render_frame if max_episodes_rendered > 0 else None,
enable_progbar=enable_inner_progbar,
)
# Figure out where in each rollout sequence the first done condition was encountered (results after
# this won't be included).
n_steps = rollout_data["done"].shape[1]
# Note: this relies on a property of argmax: that it returns the first occurrence as a tiebreaker.
done_indices = torch.argmax(rollout_data["done"].to(int), axis=1) # (batch_size, rollout_steps)
# Make a mask with shape (batch, n_steps) to mask out rollout data after the first done
# (batch-element-wise). Note the `done_indices + 1` to make sure to keep the data from the done step.
mask = (torch.arange(n_steps) <= einops.repeat(done_indices + 1, "b -> b s", s=n_steps)).int()
# Extend metrics.
batch_sum_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "sum")
sum_rewards.extend(batch_sum_rewards.tolist())
batch_max_rewards = einops.reduce((rollout_data["reward"] * mask), "b n -> b", "max")
max_rewards.extend(batch_max_rewards.tolist())
batch_successes = einops.reduce((rollout_data["success"] * mask), "b n -> b", "any")
all_successes.extend(batch_successes.tolist())
all_seeds.extend(seeds)
if return_episode_data:
this_episode_data = _compile_episode_data(
rollout_data,
done_indices,
start_episode_index=batch_ix * env.num_envs,
start_data_index=(
0 if episode_data is None else (episode_data["episode_data_index"]["to"][-1].item())
),
fps=env.unwrapped.metadata["render_fps"],
)
if episode_data is None:
episode_data = this_episode_data
else:
# Some sanity checks to make sure we are not correctly compiling the data.
assert (
episode_data["hf_dataset"]["episode_index"][-1] + 1
== this_episode_data["hf_dataset"]["episode_index"][0]
)
assert (
episode_data["hf_dataset"]["index"][-1] + 1 == this_episode_data["hf_dataset"]["index"][0]
)
assert torch.equal(
episode_data["episode_data_index"]["to"][-1],
this_episode_data["episode_data_index"]["from"][0],
)
# Concatenate the episode data.
episode_data = {
"hf_dataset": concatenate_datasets(
[episode_data["hf_dataset"], this_episode_data["hf_dataset"]]
),
"episode_data_index": {
k: torch.cat(
[
episode_data["episode_data_index"][k],
this_episode_data["episode_data_index"][k],
]
)
for k in ["from", "to"]
},
}
# Maybe render video for visualization.
if max_episodes_rendered > 0 and len(ep_frames) > 0:
batch_stacked_frames = np.stack(ep_frames, axis=1) # (b, t, *)
for stacked_frames, done_index in zip(
batch_stacked_frames, done_indices.flatten().tolist(), strict=False
):
if n_episodes_rendered >= max_episodes_rendered:
break
video_dir.mkdir(parents=True, exist_ok=True)
video_path = video_dir / f"eval_episode_{n_episodes_rendered}.mp4"
video_paths.append(str(video_path))
thread = threading.Thread(
target=write_video,
args=(
str(video_path),
stacked_frames[: done_index + 1], # + 1 to capture the last observation
env.unwrapped.metadata["render_fps"],
),
)
thread.start()
threads.append(thread)
n_episodes_rendered += 1
progbar.set_postfix(
{"running_success_rate": f"{np.mean(all_successes[:n_episodes]).item() * 100:.1f}%"}
)
# Wait till all video rendering threads are done.
for thread in threads:
thread.join()
# Compile eval info.
info = {
"per_episode": [
{
"episode_ix": i,
"sum_reward": sum_reward,
"max_reward": max_reward,
"success": success,
"seed": seed,
}
for i, (sum_reward, max_reward, success, seed) in enumerate(
zip(
sum_rewards[:n_episodes],
max_rewards[:n_episodes],
all_successes[:n_episodes],
all_seeds[:n_episodes],
strict=True,
)
)
],
"aggregated": {
"avg_sum_reward": float(np.nanmean(sum_rewards[:n_episodes])),
"avg_max_reward": float(np.nanmean(max_rewards[:n_episodes])),
"pc_success": float(np.nanmean(all_successes[:n_episodes]) * 100),
"eval_s": time.time() - start,
"eval_ep_s": (time.time() - start) / n_episodes,
},
}
if return_episode_data:
info["episodes"] = episode_data
if max_episodes_rendered > 0:
info["video_paths"] = video_paths
return info
def _compile_episode_data(
rollout_data: dict, done_indices: Tensor, start_episode_index: int, start_data_index: int, fps: float
) -> dict:
"""Convenience function for `eval_policy(return_episode_data=True)`
Compiles all the rollout data into a Hugging Face dataset.
Similar logic is implemented when datasets are pushed to hub (see: `push_to_hub`).
"""
ep_dicts = []
episode_data_index = {"from": [], "to": []}
total_frames = 0
data_index_from = start_data_index
for ep_ix in range(rollout_data["action"].shape[0]):
num_frames = done_indices[ep_ix].item() + 1 # + 1 to include the first done frame
total_frames += num_frames
# TODO(rcadene): We need to add a missing last frame which is the observation
# of a done state. it is critical to have this frame for tdmpc to predict a "done observation/state"
ep_dict = {
"action": rollout_data["action"][ep_ix, :num_frames],
"episode_index": torch.tensor([start_episode_index + ep_ix] * num_frames),
"frame_index": torch.arange(0, num_frames, 1),
"timestamp": torch.arange(0, num_frames, 1) / fps,
"next.done": rollout_data["done"][ep_ix, :num_frames],
"next.reward": rollout_data["reward"][ep_ix, :num_frames].type(torch.float32),
}
for key in rollout_data["observation"]:
ep_dict[key] = rollout_data["observation"][key][ep_ix][:num_frames]
ep_dicts.append(ep_dict)
episode_data_index["from"].append(data_index_from)
episode_data_index["to"].append(data_index_from + num_frames)
data_index_from += num_frames
data_dict = {}
for key in ep_dicts[0]:
if "image" not in key:
data_dict[key] = torch.cat([x[key] for x in ep_dicts])
else:
if key not in data_dict:
data_dict[key] = []
for ep_dict in ep_dicts:
for img in ep_dict[key]:
# sanity check that images are channel first
c, h, w = img.shape
assert c < h and c < w, f"expect channel first images, but instead {img.shape}"
# sanity check that images are float32 in range [0,1]
assert img.dtype == torch.float32, f"expect torch.float32, but instead {img.dtype=}"
assert img.max() <= 1, f"expect pixels lower than 1, but instead {img.max()=}"
assert img.min() >= 0, f"expect pixels greater than 1, but instead {img.min()=}"
# from float32 in range [0,1] to uint8 in range [0,255]
img *= 255
img = img.type(torch.uint8)
# convert to channel last and numpy as expected by PIL
img = PILImage.fromarray(img.permute(1, 2, 0).numpy())
data_dict[key].append(img)
data_dict["index"] = torch.arange(start_data_index, start_data_index + total_frames, 1)
episode_data_index["from"] = torch.tensor(episode_data_index["from"])
episode_data_index["to"] = torch.tensor(episode_data_index["to"])
# TODO(rcadene): clean this
features = {}
for key in rollout_data["observation"]:
if "image" in key:
features[key] = Image()
else:
features[key] = Sequence(length=data_dict[key].shape[1], feature=Value(dtype="float32", id=None))
features.update(
{
"action": Sequence(length=data_dict["action"].shape[1], feature=Value(dtype="float32", id=None)),
"episode_index": Value(dtype="int64", id=None),
"frame_index": Value(dtype="int64", id=None),
"timestamp": Value(dtype="float32", id=None),
"next.reward": Value(dtype="float32", id=None),
"next.done": Value(dtype="bool", id=None),
#'next.success': Value(dtype='bool', id=None),
"index": Value(dtype="int64", id=None),
}
)
features = Features(features)
hf_dataset = Dataset.from_dict(data_dict, features=features)
hf_dataset.set_transform(hf_transform_to_torch)
return {
"hf_dataset": hf_dataset,
"episode_data_index": episode_data_index,
}
def eval(
pretrained_policy_path: str | None = None,
hydra_cfg_path: str | None = None,
config_overrides: list[str] | None = None,
):
assert (pretrained_policy_path is None) ^ (hydra_cfg_path is None)
if hydra_cfg_path is None:
hydra_cfg = init_hydra_config(pretrained_policy_path / "config.yaml", config_overrides)
else:
hydra_cfg = init_hydra_config(hydra_cfg_path, config_overrides)
out_dir = (
f"outputs/eval/{dt.now().strftime('%Y-%m-%d/%H-%M-%S')}_{hydra_cfg.env.name}_{hydra_cfg.policy.name}"
)
if out_dir is None:
raise NotImplementedError()
# Check device is available
get_safe_torch_device(hydra_cfg.device, log=True)
torch.backends.cudnn.benchmark = True
torch.backends.cuda.matmul.allow_tf32 = True
set_global_seed(hydra_cfg.seed)
log_output_dir(out_dir)
logging.info("Making environment.")
env = make_env(hydra_cfg)
logging.info("Making policy.")
if hydra_cfg_path is None:
policy = make_policy(hydra_cfg=hydra_cfg, pretrained_policy_name_or_path=pretrained_policy_path)
else:
# Note: We need the dataset stats to pass to the policy's normalization modules.
policy = make_policy(hydra_cfg=hydra_cfg, dataset_stats=make_dataset(hydra_cfg).stats)
policy.eval()
info = eval_policy(
env,
policy,
hydra_cfg.eval.n_episodes,
max_episodes_rendered=10,
video_dir=Path(out_dir) / "eval",
start_seed=hydra_cfg.seed,
enable_progbar=True,
enable_inner_progbar=True,
)
print(info["aggregated"])
# Save info
with open(Path(out_dir) / "eval_info.json", "w") as f:
json.dump(info, f, indent=2)
env.close()
logging.info("End of eval")
if __name__ == "__main__":
init_logging()
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-p",
"--pretrained-policy-name-or-path",
help=(
"Either the repo ID of a model hosted on the Hub or a path to a directory containing weights "
"saved using `Policy.save_pretrained`. If not provided, the policy is initialized from scratch "
"(useful for debugging). This argument is mutually exclusive with `--config`."
),
)
group.add_argument(
"--config",
help=(
"Path to a yaml config you want to use for initializing a policy from scratch (useful for "
"debugging). This argument is mutually exclusive with `--pretrained-policy-name-or-path` (`-p`)."
),
)
parser.add_argument("--revision", help="Optionally provide the Hugging Face Hub revision ID.")
parser.add_argument(
"overrides",
nargs="*",
help="Any key=value arguments to override config values (use dots for.nested=overrides)",
)
args = parser.parse_args()
if args.pretrained_policy_name_or_path is None:
eval(hydra_cfg_path=args.config, config_overrides=args.overrides)
else:
try:
pretrained_policy_path = Path(
snapshot_download(args.pretrained_policy_name_or_path, revision=args.revision)
)
except (HFValidationError, RepositoryNotFoundError) as e:
if isinstance(e, HFValidationError):
error_message = (
"The provided pretrained_policy_name_or_path is not a valid Hugging Face Hub repo ID."
)
else:
error_message = (
"The provided pretrained_policy_name_or_path was not found on the Hugging Face Hub."
)
logging.warning(f"{error_message} Treating it as a local directory.")
pretrained_policy_path = Path(args.pretrained_policy_name_or_path)
if not pretrained_policy_path.is_dir() or not pretrained_policy_path.exists():
raise ValueError(
"The provided pretrained_policy_name_or_path is not a valid/existing Hugging Face Hub "
"repo ID, nor is it an existing local directory."
)
eval(pretrained_policy_path=pretrained_policy_path, config_overrides=args.overrides)
| lerobot/lerobot/scripts/eval.py/0 | {
"file_path": "lerobot/lerobot/scripts/eval.py",
"repo_id": "lerobot",
"token_count": 10837
} | 301 |
{
"_data_files": [
{
"filename": "data-00000-of-00001.arrow"
}
],
"_fingerprint": "eb913a2b1a68aa74",
"_format_columns": null,
"_format_kwargs": {},
"_format_type": null,
"_output_all_columns": false,
"_split": null
} | lerobot/tests/data/lerobot/aloha_sim_insertion_human/train/state.json/0 | {
"file_path": "lerobot/tests/data/lerobot/aloha_sim_insertion_human/train/state.json",
"repo_id": "lerobot",
"token_count": 111
} | 302 |
import shutil
from pathlib import Path
import torch
from safetensors.torch import save_file
from lerobot.common.datasets.factory import make_dataset
from lerobot.common.policies.factory import make_policy
from lerobot.common.utils.utils import init_hydra_config, set_global_seed
from lerobot.scripts.train import make_optimizer_and_scheduler
from tests.utils import DEFAULT_CONFIG_PATH
def get_policy_stats(env_name, policy_name, extra_overrides=None):
cfg = init_hydra_config(
DEFAULT_CONFIG_PATH,
overrides=[
f"env={env_name}",
f"policy={policy_name}",
"device=cpu",
]
+ extra_overrides,
)
set_global_seed(1337)
dataset = make_dataset(cfg)
policy = make_policy(cfg, dataset_stats=dataset.stats)
policy.train()
optimizer, _ = make_optimizer_and_scheduler(cfg, policy)
dataloader = torch.utils.data.DataLoader(
dataset,
num_workers=0,
batch_size=cfg.training.batch_size,
shuffle=False,
)
batch = next(iter(dataloader))
output_dict = policy.forward(batch)
output_dict = {k: v for k, v in output_dict.items() if isinstance(v, torch.Tensor)}
loss = output_dict["loss"]
loss.backward()
grad_stats = {}
for key, param in policy.named_parameters():
if param.requires_grad:
grad_stats[f"{key}_mean"] = param.grad.mean()
grad_stats[f"{key}_std"] = (
param.grad.std() if param.grad.numel() > 1 else torch.tensor(float(0.0))
)
optimizer.step()
param_stats = {}
for key, param in policy.named_parameters():
param_stats[f"{key}_mean"] = param.mean()
param_stats[f"{key}_std"] = param.std() if param.numel() > 1 else torch.tensor(float(0.0))
optimizer.zero_grad()
policy.reset()
# HACK: We reload a batch with no delta_timestamps as `select_action` won't expect a timestamps dimension
dataset.delta_timestamps = None
batch = next(iter(dataloader))
obs = {
k: batch[k]
for k in batch
if k in ["observation.image", "observation.images.top", "observation.state"]
}
actions_queue = (
cfg.policy.n_action_steps if "n_action_steps" in cfg.policy else cfg.policy.n_action_repeats
)
actions = {str(i): policy.select_action(obs).contiguous() for i in range(actions_queue)}
return output_dict, grad_stats, param_stats, actions
def save_policy_to_safetensors(output_dir, env_name, policy_name, extra_overrides):
env_policy_dir = Path(output_dir) / f"{env_name}_{policy_name}"
if env_policy_dir.exists():
shutil.rmtree(env_policy_dir)
env_policy_dir.mkdir(parents=True, exist_ok=True)
output_dict, grad_stats, param_stats, actions = get_policy_stats(env_name, policy_name, extra_overrides)
save_file(output_dict, env_policy_dir / "output_dict.safetensors")
save_file(grad_stats, env_policy_dir / "grad_stats.safetensors")
save_file(param_stats, env_policy_dir / "param_stats.safetensors")
save_file(actions, env_policy_dir / "actions.safetensors")
if __name__ == "__main__":
# Instructions: include the policies that you want to save artifacts for here. Please make sure to revert
# your changes when you are done.
env_policies = []
for env, policy, extra_overrides in env_policies:
save_policy_to_safetensors("tests/data/save_policy_to_safetensors", env, policy, extra_overrides)
| lerobot/tests/scripts/save_policy_to_safetensor.py/0 | {
"file_path": "lerobot/tests/scripts/save_policy_to_safetensor.py",
"repo_id": "lerobot",
"token_count": 1431
} | 303 |
#ifndef _NPY_INCLUDE_NEIGHBORHOOD_IMP
#error You should not include this header directly
#endif
/*
* Private API (here for inline)
*/
static NPY_INLINE int
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter);
/*
* Update to next item of the iterator
*
* Note: this simply increment the coordinates vector, last dimension
* incremented first , i.e, for dimension 3
* ...
* -1, -1, -1
* -1, -1, 0
* -1, -1, 1
* ....
* -1, 0, -1
* -1, 0, 0
* ....
* 0, -1, -1
* 0, -1, 0
* ....
*/
#define _UPDATE_COORD_ITER(c) \
wb = iter->coordinates[c] < iter->bounds[c][1]; \
if (wb) { \
iter->coordinates[c] += 1; \
return 0; \
} \
else { \
iter->coordinates[c] = iter->bounds[c][0]; \
}
static NPY_INLINE int
_PyArrayNeighborhoodIter_IncrCoord(PyArrayNeighborhoodIterObject* iter)
{
npy_intp i, wb;
for (i = iter->nd - 1; i >= 0; --i) {
_UPDATE_COORD_ITER(i)
}
return 0;
}
/*
* Version optimized for 2d arrays, manual loop unrolling
*/
static NPY_INLINE int
_PyArrayNeighborhoodIter_IncrCoord2D(PyArrayNeighborhoodIterObject* iter)
{
npy_intp wb;
_UPDATE_COORD_ITER(1)
_UPDATE_COORD_ITER(0)
return 0;
}
#undef _UPDATE_COORD_ITER
/*
* Advance to the next neighbour
*/
static NPY_INLINE int
PyArrayNeighborhoodIter_Next(PyArrayNeighborhoodIterObject* iter)
{
_PyArrayNeighborhoodIter_IncrCoord (iter);
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
return 0;
}
/*
* Reset functions
*/
static NPY_INLINE int
PyArrayNeighborhoodIter_Reset(PyArrayNeighborhoodIterObject* iter)
{
npy_intp i;
for (i = 0; i < iter->nd; ++i) {
iter->coordinates[i] = iter->bounds[i][0];
}
iter->dataptr = iter->translate((PyArrayIterObject*)iter, iter->coordinates);
return 0;
}
| neuralcoref/include/numpy/_neighborhood_iterator_imp.h/0 | {
"file_path": "neuralcoref/include/numpy/_neighborhood_iterator_imp.h",
"repo_id": "neuralcoref",
"token_count": 786
} | 304 |
/*
* This include file is provided for inclusion in Cython *.pyd files where
* one would like to define the NPY_NO_DEPRECATED_API macro. It can be
* included by
*
* cdef extern from "npy_no_deprecated_api.h": pass
*
*/
#ifndef NPY_NO_DEPRECATED_API
/* put this check here since there may be multiple includes in C extensions. */
#if defined(NDARRAYTYPES_H) || defined(_NPY_DEPRECATED_API_H) || \
defined(OLD_DEFINES_H)
#error "npy_no_deprecated_api.h" must be first among numpy includes.
#else
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#endif
#endif
| neuralcoref/include/numpy/npy_no_deprecated_api.h/0 | {
"file_path": "neuralcoref/include/numpy/npy_no_deprecated_api.h",
"repo_id": "neuralcoref",
"token_count": 203
} | 305 |
package CorScorer;
# Copyright (C) 2009-2011, Emili Sapena esapena <at> lsi.upc.edu
# 2011-2014, Sameer Pradhan <at> childrens.harvard.edu
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. This program is distributed in the hope that
# it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Modified in 2013 for v1.07 by Sebastian Martschat,
# sebastian.martschat <at> h-its.org
#
# Revised in July, 2013 by Xiaoqiang Luo (xql <at> google.com) to create v6.0.
# See comments under $VERSION for modifications.
#
# Revised in March, 2014 by Sameer Pradhan (sameer.pradhan <at> childrens.harvard.edu)
# to implement the BLANC metric for predicted mentions
use strict;
use Algorithm::Munkres;
use Data::Dumper;
#use Algorithm::Combinatorics qw(combinations);
use Math::Combinatorics;
use Cwd;
our $VERSION = '8.01';
print "version: " . $VERSION . " " . Cwd::realpath(__FILE__) . "\n";
##
# 8.01 fixed a bug that crashed the the BLANC scoring when duplicate
# (potentially singleton) mentions were present in the
# response. as part of the fix, wee will allow a maximum of 10
# duplicate mentions in response, but if there are more, than it
# is a sign of a systematic error/manipulation and we will refuse
# to score that run.
# 8.0 added code to compute the BLANC metric (generalized for both gold
# and system mentions (Luo et al., 2014)
#
# 7.0 Removed code to compute *_cs metrics
#
# 6.0 The directory hosting the scorer is under v6 and internal $VERSION is
# set to "6.0."
# Changes:
# - 'ceafm', 'ceafe' and 'bcub' in the previous version are renamed
# 'ceafm_cs', 'ceafe_cs', and 'bcub_cs', respectively.
# - 'ceafm', 'ceafe' and 'bcub' are implemented without (Cai&Strube 2010)
# modification. These metrics can handle twinless mentions and entities
# just fine.
#
# 1.07 Modifications to implement BCUB and CEAFM
# exactly as proposed by (Cai & Strube, 2010).
# 1.06 ?
# 1.05 Modification of IdentifMentions in order to correctly evaluate the
# outputs with detected mentions. Based on (Cai & Strubbe, 2010)
# 1.04 Some output corrections in BLANC functions. Changed package name to "Scorer"
# 1.03 Detects mentions that start in a document but do not end
# 1.02 Corrected BCUB bug. It fails when the key file does not have any mention
# global variables
my $VERBOSE = 0;#2;#3;
my $HEAD_COLUMN = 8;
my $RESPONSE_COLUMN = -1;
my $KEY_COLUMN = -1;
# Score. Scores the results of a coreference resolution system
# Input: Metric, keys file, response file, [name]
# Metric: the metric desired to evaluate:
# muc: MUCScorer (Vilain et al, 1995)
# bcub: B-Cubed (Bagga and Baldwin, 1998)
# ceafm: CEAF (Luo et al, 2005) using mention-based similarity
# ceafe: CEAF (Luo et al, 2005) using entity-based similarity
# keys file: file with expected coreference chains in SemEval format
# response file: file with output of coreference system (SemEval format)
# name: [optional] the name of the document to score. If name is not
# given, all the documents in the dataset will be scored.
#
# Output: an array with numerators and denominators of recall and precision
# (recall_num, recall_den, precision_num, precision_den)
#
# Final scores:
# Recall = recall_num / recall_den
# Precision = precision_num / precision_den
# F1 = 2 * Recall * Precision / (Recall + Precision)
sub Score {
my ($metric, $kFile, $rFile, $name) = @_;
our $repeated_mentions = 0;
if (lc($metric) eq 'blanc') {
return ScoreBLANC($kFile, $rFile, $name);
}
my %idenTotals =
(recallDen => 0, recallNum => 0, precisionDen => 0, precisionNum => 0);
my ($acumNR, $acumDR, $acumNP, $acumDP) = (0, 0, 0, 0);
if (defined($name) && $name ne 'none') {
print "$name:\n" if ($VERBOSE);
my $keys = GetCoreference($kFile, $KEY_COLUMN, $name);
my $response = GetCoreference($rFile, $RESPONSE_COLUMN, $name);
my (
$keyChains, $keyChainsWithSingletonsFromResponse,
$responseChains, $responseChainsWithoutMentionsNotInKey,
$keyChainsOrig, $responseChainsOrig
) = IdentifMentions($keys, $response, \%idenTotals);
($acumNR, $acumDR, $acumNP, $acumDP) = Eval(
$metric, $keyChains,
$keyChainsWithSingletonsFromResponse, $responseChains,
$responseChainsWithoutMentionsNotInKey, $keyChainsOrig,
$responseChainsOrig
);
}
else {
my $kIndexNames = GetFileNames($kFile);
my $rIndexNames = GetFileNames($rFile);
$VERBOSE = 0 if ($name eq 'none');
foreach my $iname (keys(%{$kIndexNames})) {
my $keys =
GetCoreference($kFile, $KEY_COLUMN, $iname, $kIndexNames->{$iname});
my $response = GetCoreference($rFile, $RESPONSE_COLUMN, $iname,
$rIndexNames->{$iname});
print "$iname:\n" if ($VERBOSE);
my (
$keyChains, $keyChainsWithSingletonsFromResponse,
$responseChains, $responseChainsWithoutMentionsNotInKey,
$keyChainsOrig, $responseChainsOrig
) = IdentifMentions($keys, $response, \%idenTotals);
my ($nr, $dr, $np, $dp) = Eval(
$metric, $keyChains,
$keyChainsWithSingletonsFromResponse, $responseChains,
$responseChainsWithoutMentionsNotInKey, $keyChainsOrig,
$responseChainsOrig
);
$acumNR += $nr;
$acumDR += $dr;
$acumNP += $np;
$acumDP += $dp;
}
}
if ($VERBOSE || $name eq 'none') {
print "\n====== TOTALS =======\n";
print "Identification of Mentions: ";
ShowRPF(
$idenTotals{recallNum}, $idenTotals{recallDen},
$idenTotals{precisionNum}, $idenTotals{precisionDen}
);
print "Coreference: ";
ShowRPF($acumNR, $acumDR, $acumNP, $acumDP);
}
return ($acumNR, $acumDR, $acumNP, $acumDP,
$idenTotals{recallNum}, $idenTotals{recallDen},
$idenTotals{precisionNum}, $idenTotals{precisionDen});
}
sub GetIndex {
my ($ind, $i) = @_;
if (!defined($ind->{$i})) {
my $n = $ind->{nexti} || 0;
$ind->{$i} = $n;
$n++;
$ind->{nexti} = $n;
}
return $ind->{$i};
}
# Get the coreference information from column $column of the file $file
# If $name is defined, only keys between "#begin document $name" and
# "#end file $name" are taken.
# The output is an array of entites, where each entity is an array
# of mentions and each mention is an array with two values corresponding
# to the mention's begin and end. For example:
# @entities = ( [ [1,3], [45,45], [57,62] ], # <-- entity 0
# [ [5,5], [25,27], [31,31] ], # <-- entity 1
# ...
# );
# entity 0 is composed of 3 mentions: from token 1 to 3, token 45 and
# from token 57 to 62 (both included)
#
# if $name is not specified, the output is a hash including each file
# found in the document:
# $coref{$file} = \@entities
sub GetCoreference {
my ($file, $column, $name, $pos) = @_;
my %coref;
my %ind;
open(F, $file) || die "Can not open $file: $!";
if ($pos) {
seek(F, $pos, 0);
}
my $fName;
my $getout = 0;
do {
# look for the begin of a file
while (my $l = <F>) {
chomp($l);
$l =~ s/\r$//; # m$ format jokes
if ($l =~ /^\#\s*begin document (.*?)$/) {
if (defined($name)) {
if ($name eq $1) {
$fName = $name;
$getout = 1;
last;
}
}
else {
$fName = $1;
last;
}
}
}
print "====> $fName:\n" if ($VERBOSE > 1);
# Extract the keys from the file until #end is found
my $lnumber = 0;
my @entities;
my @half;
my @head;
my @sentId;
while (my $l = <F>) {
chomp($l);
$l =~ s/^\s+$//;
next if ($l eq '');
if ($l =~ /\#\s*end document/) {
foreach my $h (@half) {
if (defined($h) && @$h) {
die "Error: some mentions in the document do not close\n";
}
}
last;
}
my @columns = split(/\t/, $l);
my $cInfo = $columns[$column];
push(@head, $columns[$HEAD_COLUMN]);
push(@sentId, $columns[0]);
if ($cInfo ne '_') {
#discard double antecedent
while ($cInfo =~ s/\((\d+\+\d)\)//) {
print "Discarded ($1)\n" if ($VERBOSE > 1);
}
# one-token mention(s)
while ($cInfo =~ s/\((\d+)\)//) {
my $ie = GetIndex(\%ind, $1);
push(@{$entities[$ie]}, [$lnumber, $lnumber, $lnumber]);
print "+mention (entity $ie): ($lnumber,$lnumber)\n"
if ($VERBOSE > 2);
}
# begin of mention(s)
while ($cInfo =~ s/\((\d+)//) {
my $ie = GetIndex(\%ind, $1);
push(@{$half[$ie]}, $lnumber);
print "+init mention (entity $ie): ($lnumber\n" if ($VERBOSE > 2);
}
# end of mention(s)
while ($cInfo =~ s/(\d+)\)//) {
my $numberie = $1;
my $ie = GetIndex(\%ind, $numberie);
my $start = pop(@{$half[$ie]});
if (defined($start)) {
my $inim = $sentId[$start];
my $endm = $sentId[$lnumber];
my $tHead = $start;
# the token whose head is outside the mention is the head of the mention
for (my $t = $start ; $t <= $lnumber ; $t++) {
if ($head[$t] < $inim || $head[$t] > $endm) {
$tHead = $t;
last;
}
}
push(@{$entities[$ie]}, [$start, $lnumber, $tHead]);
}
else {
die
"Detected the end of a mention [$numberie]($ie) without begin (?,$lnumber)";
}
print "+mention (entity $ie): ($start,$lnumber)\n" if ($VERBOSE > 2);
}
}
$lnumber++;
}
# verbose
if ($VERBOSE > 1) {
print "File $fName:\n";
for (my $e = 0 ; $e < scalar(@entities) ; $e++) {
print "Entity $e:";
foreach my $mention (@{$entities[$e]}) {
print " ($mention->[0],$mention->[1])";
}
print "\n";
}
}
$coref{$fName} = \@entities;
} while (!$getout && !eof(F));
if (defined($name)) {
return $coref{$name};
}
return \%coref;
}
sub GetFileNames {
my $file = shift;
my %hash;
my $last = 0;
open(F, $file) || die "Can not open $file: $!";
while (my $l = <F>) {
chomp($l);
$l =~ s/\r$//; # m$ format jokes
if ($l =~ /^\#\s*begin document (.*?)$/) {
my $name = $1;
$hash{$name} = $last;
}
$last = tell(F);
}
close(F);
return \%hash;
}
sub IdentifMentions {
my ($keys, $response, $totals) = @_;
my @kChains;
my @kChainsWithSingletonsFromResponse;
my @rChains;
my @rChainsWithoutMentionsNotInKey;
my %id;
my %map;
my $idCount = 0;
my @assigned;
my @kChainsOrig = ();
my @rChainsOrig = ();
# for each mention found in keys an ID is generated
foreach my $entity (@$keys) {
foreach my $mention (@$entity) {
if (defined($id{"$mention->[0],$mention->[1]"})) {
print "Repeated mention in the key: $mention->[0], $mention->[1] ",
$id{"$mention->[0],$mention->[1]"}, $idCount, "\n";
}
$id{"$mention->[0],$mention->[1]"} = $idCount;
$idCount++;
}
}
# correct identification: Exact bound limits
my $exact = 0;
foreach my $entity (@$response) {
my $i = 0;
my @remove;
foreach my $mention (@$entity) {
if (defined($map{"$mention->[0],$mention->[1]"})) {
print "Repeated mention in the response: $mention->[0], $mention->[1] ",
$map{"$mention->[0],$mention->[1]"},
$id{"$mention->[0],$mention->[1]"},
"\n";
push(@remove, $i);
$main::repeated_mentions++;
if ($main::repeated_mentions > 10)
{
print STDERR "Found too many repeated mentions (> 10) in the response, so refusing to score. Please fix the output.\n";
exit 1;
}
}
elsif (defined($id{"$mention->[0],$mention->[1]"})
&& !$assigned[$id{"$mention->[0],$mention->[1]"}])
{
$assigned[$id{"$mention->[0],$mention->[1]"}] = 1;
$map{"$mention->[0],$mention->[1]"} =
$id{"$mention->[0],$mention->[1]"};
$exact++;
}
$i++;
}
# Remove repeated mentions in the response
foreach my $i (sort { $b <=> $a } (@remove)) {
splice(@$entity, $i, 1);
}
}
# now, lets remove any empty elements in the response array after removing
# potential repeats
my @another_remove = ();
my $ii;
foreach my $eentity (@$response)
{
if ( @$eentity == 0)
{
push(@another_remove, $ii);
}
$ii++;
}
foreach my $iii (sort { $b <=> $a } (@another_remove)) {
splice(@$response, $iii, 1);
}
# Partial identificaiton: Inside bounds and including the head
my $part = 0;
# Each mention in response not included in keys has a new ID
my $mresp = 0;
foreach my $entity (@$response) {
foreach my $mention (@$entity) {
my $ini = $mention->[0];
my $end = $mention->[1];
if (!defined($map{"$mention->[0],$mention->[1]"})) {
$map{"$mention->[0],$mention->[1]"} = $idCount;
$idCount++;
}
$mresp++;
}
}
if ($VERBOSE) {
print "Total key mentions: " . scalar(keys(%id)) . "\n";
print "Total response mentions: " . scalar(keys(%map)) . "\n";
print "Strictly correct identified mentions: $exact\n";
print "Partially correct identified mentions: $part\n";
print "No identified: " . (scalar(keys(%id)) - $exact - $part) . "\n";
print "Invented: " . ($idCount - scalar(keys(%id))) . "\n";
}
if (defined($totals)) {
$totals->{recallDen} += scalar(keys(%id));
$totals->{recallNum} += $exact;
$totals->{precisionDen} += scalar(keys(%map));
$totals->{precisionNum} += $exact;
$totals->{precisionExact} += $exact;
$totals->{precisionPart} += $part;
}
# The coreference chains arrays are generated again with ID of mentions
# instead of token coordenates
my $e = 0;
foreach my $entity (@$keys) {
foreach my $mention (@$entity) {
push(@{$kChainsOrig[$e]}, $id{"$mention->[0],$mention->[1]"});
push(@{$kChains[$e]}, $id{"$mention->[0],$mention->[1]"});
}
$e++;
}
$e = 0;
foreach my $entity (@$response) {
foreach my $mention (@$entity) {
push(@{$rChainsOrig[$e]}, $map{"$mention->[0],$mention->[1]"});
push(@{$rChains[$e]}, $map{"$mention->[0],$mention->[1]"});
}
$e++;
}
# In order to use the metrics as in (Cai & Strube, 2010):
# 1. Include the non-detected key mentions into the response as singletons
# 2. Discard the detected mentions not included in key resolved as singletons
# 3a. For computing precision: put twinless system mentions in key
# 3b. For computing recall: discard twinless system mentions in response
my $kIndex = Indexa(\@kChains);
my $rIndex = Indexa(\@rChains);
# 1. Include the non-detected key mentions into the response as singletons
my $addkey = 0;
if (scalar(keys(%id)) - $exact - $part > 0) {
foreach my $kc (@kChains) {
foreach my $m (@$kc) {
if (!defined($rIndex->{$m})) {
push(@rChains, [$m]);
$addkey++;
}
}
}
}
@kChainsWithSingletonsFromResponse = @kChains;
@rChainsWithoutMentionsNotInKey = [];
# 2. Discard the detected mentions not included in key resolved as singletons
my $delsin = 0;
if ($idCount - scalar(keys(%id)) > 0) {
foreach my $rc (@rChains) {
if (scalar(@$rc) == 1) {
if (!defined($kIndex->{$rc->[0]})) {
@$rc = ();
$delsin++;
}
}
}
}
# 3a. For computing precision: put twinless system mentions in key as singletons
my $addinv = 0;
if ($idCount - scalar(keys(%id)) > 0) {
foreach my $rc (@rChains) {
if (scalar(@$rc) > 1) {
foreach my $m (@$rc) {
if (!defined($kIndex->{$m})) {
push(@kChainsWithSingletonsFromResponse, [$m]);
$addinv++;
}
}
}
}
}
# 3b. For computing recall: discard twinless system mentions in response
my $delsys = 0;
foreach my $rc (@rChains) {
my @temprc;
my $i = 0;
foreach my $m (@$rc) {
if (defined($kIndex->{$m})) {
push(@temprc, $m);
$i++;
}
else {
$delsys++;
}
}
if ($i > 0) {
push(@rChainsWithoutMentionsNotInKey, \@temprc);
}
}
# We clean the empty chains
my @newrc;
foreach my $rc (@rChains) {
if (scalar(@$rc) > 0) {
push(@newrc, $rc);
}
}
@rChains = @newrc;
return (
\@kChains, \@kChainsWithSingletonsFromResponse,
\@rChains, \@rChainsWithoutMentionsNotInKey,
\@kChainsOrig, \@rChainsOrig
);
}
sub Eval {
my ($scorer, $keys, $keysPrecision, $response, $responseRecall,
$keyChainsOrig, $responseChainsOrig)
= @_;
$scorer = lc($scorer);
my ($nr, $dr, $np, $dp);
if ($scorer eq 'muc') {
($nr, $dr, $np, $dp) =
MUCScorer($keys, $keysPrecision, $response, $responseRecall);
}
elsif ($scorer eq 'bcub') {
($nr, $dr, $np, $dp) = BCUBED($keyChainsOrig, $responseChainsOrig);
}
elsif ($scorer eq 'ceafm') {
($nr, $dr, $np, $dp) = CEAF($keyChainsOrig, $responseChainsOrig, 1);
}
elsif ($scorer eq 'ceafe') {
($nr, $dr, $np, $dp) = CEAF($keyChainsOrig, $responseChainsOrig, 0);
}
else {
die "Metric $scorer not implemented yet\n";
}
return ($nr, $dr, $np, $dp);
}
# Indexes an array of arrays, in order to easily know the position of an element
sub Indexa {
my ($arrays) = @_;
my %index;
for (my $i = 0 ; $i < @$arrays ; $i++) {
foreach my $e (@{$arrays->[$i]}) {
$index{$e} = $i;
}
}
return \%index;
}
# Consider the "links" within every coreference chain. For example,
# chain A-B-C-D has 3 links: A-B, B-C and C-D.
# Recall: num correct links / num expected links.
# Precision: num correct links / num output links
sub MUCScorer {
my ($keys, $keysPrecision, $response, $responseRecall) = @_;
my $kIndex = Indexa($keys);
# Calculate correct links
my $correct = 0;
foreach my $rEntity (@$response) {
next if (!defined($rEntity));
# for each possible pair
for (my $i = 0 ; $i < @$rEntity ; $i++) {
my $id_i = $rEntity->[$i];
for (my $j = $i + 1 ; $j < @$rEntity ; $j++) {
my $id_j = $rEntity->[$j];
if ( defined($kIndex->{$id_i})
&& defined($kIndex->{$id_j})
&& $kIndex->{$id_i} == $kIndex->{$id_j})
{
$correct++;
last;
}
}
}
}
# Links in key
my $keylinks = 0;
foreach my $kEntity (@$keys) {
next if (!defined($kEntity));
$keylinks += scalar(@$kEntity) - 1 if (scalar(@$kEntity));
}
# Links in response
my $reslinks = 0;
foreach my $rEntity (@$response) {
next if (!defined($rEntity));
$reslinks += scalar(@$rEntity) - 1 if (scalar(@$rEntity));
}
ShowRPF($correct, $keylinks, $correct, $reslinks) if ($VERBOSE);
return ($correct, $keylinks, $correct, $reslinks);
}
# Compute precision for every mention in the response, and compute
# recall for every mention in the keys
sub BCUBED {
my ($keys, $response) = @_;
my $kIndex = Indexa($keys);
my $rIndex = Indexa($response);
my $acumP = 0;
my $acumR = 0;
foreach my $rChain (@$response) {
foreach my $m (@$rChain) {
my $kChain = (defined($kIndex->{$m})) ? $keys->[$kIndex->{$m}] : [];
my $ci = 0;
my $ri = scalar(@$rChain);
my $ki = scalar(@$kChain);
# common mentions in rChain and kChain => Ci
foreach my $mr (@$rChain) {
foreach my $mk (@$kChain) {
if ($mr == $mk) {
$ci++;
last;
}
}
}
$acumP += $ci / $ri if ($ri);
$acumR += $ci / $ki if ($ki);
}
}
# Mentions in key
my $keymentions = 0;
foreach my $kEntity (@$keys) {
$keymentions += scalar(@$kEntity);
}
# Mentions in response
my $resmentions = 0;
foreach my $rEntity (@$response) {
$resmentions += scalar(@$rEntity);
}
ShowRPF($acumR, $keymentions, $acumP, $resmentions) if ($VERBOSE);
return ($acumR, $keymentions, $acumP, $resmentions);
}
# type = 0: Entity-based
# type = 1: Mention-based
sub CEAF {
my ($keys, $response, $type) = @_;
my @sim;
for (my $i = 0 ; $i < scalar(@$keys) ; $i++) {
for (my $j = 0 ; $j < scalar(@$response) ; $j++) {
if (defined($keys->[$i]) && defined($response->[$j])) {
if ($type == 0) { # entity-based
$sim[$i][$j] = 1 - SIMEntityBased($keys->[$i], $response->[$j]);
# 1 - X => the library searches minima not maxima
}
elsif ($type == 1) { # mention-based
$sim[$i][$j] = 1 - SIMMentionBased($keys->[$i], $response->[$j]);
}
}
else {
$sim[$i][$j] = 1;
}
}
# fill the matrix when response chains are less than key ones
for (my $j = scalar(@$response) ; $j < scalar(@$keys) ; $j++) {
$sim[$i][$j] = 1;
}
#$denrec += SIMEntityBased($kChain->[$i], $kChain->[$i]);
}
my @out;
# Munkres algorithm
assign(\@sim, \@out);
my $numerador = 0;
my $denpre = 0;
my $denrec = 0;
# entity-based
if ($type == 0) {
foreach my $c (@$response) {
$denpre++ if (defined($c) && scalar(@$c) > 0);
}
foreach my $c (@$keys) {
$denrec++ if (defined($c) && scalar(@$c) > 0);
}
}
# mention-based
elsif ($type == 1) {
foreach my $c (@$response) {
$denpre += scalar(@$c) if (defined($c));
}
foreach my $c (@$keys) {
$denrec += scalar(@$c) if (defined($c));
}
}
for (my $i = 0 ; $i < scalar(@$keys) ; $i++) {
$numerador += 1 - $sim[$i][$out[$i]];
}
ShowRPF($numerador, $denrec, $numerador, $denpre) if ($VERBOSE);
return ($numerador, $denrec, $numerador, $denpre);
}
sub SIMEntityBased {
my ($a, $b) = @_;
my $intersection = 0;
# Common elements in A and B
foreach my $ma (@$a) {
next if (!defined($ma));
foreach my $mb (@$b) {
next if (!defined($mb));
if ($ma == $mb) {
$intersection++;
last;
}
}
}
my $r = 0;
my $d = scalar(@$a) + scalar(@$b);
if ($d != 0) {
$r = 2 * $intersection / $d;
}
return $r;
}
sub SIMMentionBased {
my ($a, $b) = @_;
my $intersection = 0;
# Common elements in A and B
foreach my $ma (@$a) {
next if (!defined($ma));
foreach my $mb (@$b) {
next if (!defined($mb));
if ($ma == $mb) {
$intersection++;
last;
}
}
}
return $intersection;
}
sub ShowRPF {
my ($numrec, $denrec, $numpre, $denpre, $f1) = @_;
my $precisio = $denpre ? $numpre / $denpre : 0;
my $recall = $denrec ? $numrec / $denrec : 0;
if (!defined($f1)) {
$f1 = 0;
if ($recall + $precisio) {
$f1 = 2 * $precisio * $recall / ($precisio + $recall);
}
}
print "Recall: ($numrec / $denrec) " . int($recall * 10000) / 100 . '%';
print "\tPrecision: ($numpre / $denpre) "
. int($precisio * 10000) / 100 . '%';
print "\tF1: " . int($f1 * 10000) / 100 . "\%\n";
print
"--------------------------------------------------------------------------\n";
}
# NEW
sub ScoreBLANC {
my ($kFile, $rFile, $name) = @_;
my ($acumNRa, $acumDRa, $acumNPa, $acumDPa) = (0, 0, 0, 0);
my ($acumNRr, $acumDRr, $acumNPr, $acumDPr) = (0, 0, 0, 0);
my %idenTotals =
(recallDen => 0, recallNum => 0, precisionDen => 0, precisionNum => 0);
if (defined($name) && $name ne 'none') {
print "$name:\n" if ($VERBOSE);
my $keys = GetCoreference($kFile, $KEY_COLUMN, $name);
my $response = GetCoreference($rFile, $RESPONSE_COLUMN, $name);
my (
$keyChains, $keyChainsWithSingletonsFromResponse,
$responseChains, $responseChainsWithoutMentionsNotInKey,
$keyChainsOrig, $responseChainsOrig
) = IdentifMentions($keys, $response, \%idenTotals);
(
$acumNRa, $acumDRa, $acumNPa, $acumDPa,
$acumNRr, $acumDRr, $acumNPr, $acumDPr
) = BLANC_Internal($keyChainsOrig, $responseChainsOrig);
}
else {
my $kIndexNames = GetFileNames($kFile);
my $rIndexNames = GetFileNames($rFile);
$VERBOSE = 0 if ($name eq 'none');
foreach my $iname (keys(%{$kIndexNames})) {
my $keys =
GetCoreference($kFile, $KEY_COLUMN, $iname, $kIndexNames->{$iname});
my $response = GetCoreference($rFile, $RESPONSE_COLUMN, $iname,
$rIndexNames->{$iname});
print "$name:\n" if ($VERBOSE);
my (
$keyChains, $keyChainsWithSingletonsFromResponse,
$responseChains, $responseChainsWithoutMentionsNotInKey,
$keyChainsOrig, $responseChainsOrig
) = IdentifMentions($keys, $response, \%idenTotals);
my ($nra, $dra, $npa, $dpa, $nrr, $drr, $npr, $dpr) =
BLANC_Internal($keyChainsOrig, $responseChainsOrig);
$acumNRa += $nra;
$acumDRa += $dra;
$acumNPa += $npa;
$acumDPa += $dpa;
$acumNRr += $nrr;
$acumDRr += $drr;
$acumNPr += $npr;
$acumDPr += $dpr;
}
}
if ($VERBOSE || $name eq 'none') {
print "\n====== TOTALS =======\n";
print "Identification of Mentions: ";
ShowRPF(
$idenTotals{recallNum}, $idenTotals{recallDen},
$idenTotals{precisionNum}, $idenTotals{precisionDen}
);
print "\nCoreference:\n";
print "Coreference links: ";
ShowRPF($acumNRa, $acumDRa, $acumNPa, $acumDPa);
print "Non-coreference links: ";
ShowRPF($acumNRr, $acumDRr, $acumNPr, $acumDPr);
print "BLANC: ";
my $Ra = ($acumDRa) ? $acumNRa / $acumDRa : -1;
my $Rr = ($acumDRr) ? $acumNRr / $acumDRr : -1;
my $Pa = ($acumDPa) ? $acumNPa / $acumDPa : 0;
my $Pr = ($acumDPr) ? $acumNPr / $acumDPr : 0;
my $R = ($Ra + $Rr) / 2;
my $P = ($Pa + $Pr) / 2;
my $Fa = ($Pa + $Ra) ? 2 * $Pa * $Ra / ($Pa + $Ra) : 0;
my $Fr = ($Pr + $Rr) ? 2 * $Pr * $Rr / ($Pr + $Rr) : 0;
my $f1 = ($Fa + $Fr) / 2;
if ($Ra == -1 && $Rr == -1) {
$R = 0;
$P = 0;
$f1 = 0;
}
elsif ($Ra == -1) {
$R = $Rr;
$P = $Pr;
$f1 = $Fr;
}
elsif ($Rr == -1) {
$R = $Ra;
$P = $Pa;
$f1 = $Fa;
}
ShowRPF($R, 1, $P, 1, $f1);
}
return (
$acumNRa, $acumDRa, $acumNPa, $acumDPa,
$acumNRr, $acumDRr, $acumNPr, $acumDPr
);
}
sub cartesian {
my @C = map { [$_] } @{shift @_};
foreach (@_) {
my @A = @$_;
@C = map {
my $n = $_;
map { [$n, @$_] } @C
} @A;
}
return @C;
}
sub BLANC_Internal {
my ($keys, $response) = @_;
my ($ga, $gr, $ba, $br) = (0, 0, 0, 0);
my $key_coreference_links = {};
my $key_non_coreference_links = {};
my $response_coreference_links = {};
my $response_non_coreference_links = {};
print "list containing list of chains in key:\n" if ($VERBOSE > 2);
print Dumper $keys if ($VERBOSE > 2);
print "each key chain printed individually:\n" if ($VERBOSE > 2);
if ($VERBOSE > 2) {
foreach my $z (@$keys) {
print Dumper $z;
}
}
print "list containing list of chains in response:\n" if ($VERBOSE > 2);
print Dumper $response if ($VERBOSE > 2);
print "each response chain printed individually:\n" if ($VERBOSE > 2);
if ($VERBOSE > 2) {
foreach my $z (@$response) {
print Dumper $z;
}
}
print
"---------------------------------------------------------------------------------"
. "\n"
if ($VERBOSE > 2);
print "combinations of links for each chain in the key:\n" if ($VERBOSE > 2);
for my $kkk (@$keys) {
my $ccombinat = Math::Combinatorics->new(
count => 2,
data => [@$kkk],
);
while (my @zcombo = $ccombinat->next_combination) {
print Dumper [@zcombo] if ($VERBOSE > 2);
my @zzcombo = sort { $a <=> $b } @zcombo;
$key_coreference_links->{$zzcombo[0] . "-" . $zzcombo[1]} = 1;
}
print
"................................................................................\n"
if ($VERBOSE > 2);
}
print Dumper $key_coreference_links if ($VERBOSE > 2);
print
"********************************************************************************\n"
if ($VERBOSE > 2);
print
"---------------------------------------------------------------------------------"
. "\n"
if ($VERBOSE > 2);
print "combinations of links for each chain in the response:\n"
if ($VERBOSE > 2);
for my $rrr (@$response) {
my $ccombinat = Math::Combinatorics->new(
count => 2,
data => [@$rrr],
);
while (my @zcombo = $ccombinat->next_combination) {
print Dumper [@zcombo] if ($VERBOSE > 2);
my @zzcombo = sort { $a <=> $b } @zcombo;
$response_coreference_links->{$zzcombo[0] . "-" . $zzcombo[1]} = 1;
}
print
"................................................................................\n"
if ($VERBOSE > 2);
}
print Dumper $response_coreference_links if ($VERBOSE > 2);
print
"********************************************************************************\n"
if ($VERBOSE > 2);
my $number_chains_in_key = @$keys;
print "number chains in key: " . $number_chains_in_key . "\n"
if ($VERBOSE > 2);
my @s = (0 .. $number_chains_in_key - 1);
my $ss = join(' ', @s);
my @n = split(' ', $ss);
my $combinat = Math::Combinatorics->new(
count => 2,
data => [@n],
);
print "combinations of 2 from: " . join(" ", @n) . "\n" if ($VERBOSE > 2);
print "------------------------" . ("--" x scalar(@n)) . "\n"
if ($VERBOSE > 2);
while (my @combo = $combinat->next_combination) {
my @kcombo = ();
foreach my $comboo (@combo) {
push(@kcombo, @$keys[$comboo]);
}
my $lkcombo = @kcombo;
print "length: " . $lkcombo . "\n" if ($VERBOSE > 2);
print "kcombo:\n" if ($VERBOSE > 2);
print "+++++\n" if ($VERBOSE > 2);
print Dumper [@kcombo] if ($VERBOSE > 2);
my @kccar = cartesian($kcombo[0], $kcombo[1]);
foreach my $x (@kccar) {
print "--->>>>>>>>>>>>\n" if ($VERBOSE > 2);
print Dumper $x if ($VERBOSE > 2);
my @y = sort { $a <=> $b } @$x;
print Dumper [@y] if ($VERBOSE > 2);
$key_non_coreference_links->{@y[0] . "-" . @y[1]} = 1;
}
print Dumper $key_non_coreference_links if ($VERBOSE > 2);
print "" . "\n" if ($VERBOSE > 2);
print ".....\n" if ($VERBOSE > 2);
print "\n" if ($VERBOSE > 2);
}
print "\n" if ($VERBOSE > 2);
my $number_chains_in_response = @$response;
print "number chains in response: " . $number_chains_in_response . "\n"
if ($VERBOSE > 2);
my @s = (0 .. $number_chains_in_response - 1);
my $ss = join(' ', @s);
my @n = split(' ', $ss);
my $combinat = Math::Combinatorics->new(
count => 2,
data => [@n],
);
print "combinations of 2 from: " . join(" ", @n) . "\n" if ($VERBOSE > 2);
print "------------------------" . ("--" x scalar(@n)) . "\n"
if ($VERBOSE > 2);
while (my @combo = $combinat->next_combination) {
my @kcombo = ();
foreach my $comboo (@combo) {
push(@kcombo, @$response[$comboo]);
}
my $lkcombo = @kcombo;
print "length: " . $lkcombo . "\n" if ($VERBOSE > 2);
print "kcombo:\n" if ($VERBOSE > 2);
print "+++++\n" if ($VERBOSE > 2);
print Dumper [@kcombo] if ($VERBOSE > 2);
my @kccar = cartesian($kcombo[0], $kcombo[1]);
foreach my $x (@kccar) {
print "--->>>>>>>>>>>>\n" if ($VERBOSE > 2);
print Dumper $x if ($VERBOSE > 2);
my @y = sort { $a <=> $b } @$x;
print Dumper [@y] if ($VERBOSE > 2);
$response_non_coreference_links->{@y[0] . "-" . @y[1]} = 1;
}
print Dumper $response_non_coreference_links if ($VERBOSE > 2);
print "" . "\n" if ($VERBOSE > 2);
print ".....\n" if ($VERBOSE > 2);
print "\n" if ($VERBOSE > 2);
}
print "\n" if ($VERBOSE > 2);
print
"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
if ($VERBOSE > 2);
print Dumper $key_coreference_links if ($VERBOSE > 2);
print Dumper $response_coreference_links if ($VERBOSE > 2);
print Dumper $key_non_coreference_links if ($VERBOSE > 2);
print Dumper $response_non_coreference_links if ($VERBOSE > 2);
print
"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
if ($VERBOSE > 2);
my @union_cl = my @isect_cl = ();
my %union_cl = my %isect_cl = ();
my @kcl = keys %$key_coreference_links;
my @rcl = keys %$response_coreference_links;
print Dumper @kcl if ($VERBOSE > 2);
print
"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
if ($VERBOSE > 2);
print Dumper @rcl if ($VERBOSE > 2);
print
"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
if ($VERBOSE > 2);
foreach my $e (@kcl, @rcl) { $union_cl{$e}++ && $isect_cl{$e}++ }
@union_cl = keys %union_cl;
@isect_cl = keys %isect_cl;
print Dumper @isect_cl if ($VERBOSE > 2);
print
"********************************************************************************\n"
if ($VERBOSE > 2);
my @union_ncl = my @isect_ncl = ();
my %union_ncl = my %isect_ncl = ();
my @kncl = keys %$key_non_coreference_links;
my @rncl = keys %$response_non_coreference_links;
print Dumper @kncl if ($VERBOSE > 2);
print
"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
if ($VERBOSE > 2);
print Dumper @rncl if ($VERBOSE > 2);
print
"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
if ($VERBOSE > 2);
foreach my $e (@kncl, @rncl) { $union_ncl{$e}++ && $isect_ncl{$e}++ }
@union_ncl = keys %union_ncl;
@isect_ncl = keys %isect_ncl;
print Dumper @isect_ncl if ($VERBOSE > 2);
print
"++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n"
if ($VERBOSE > 2);
my $num_isect_cl = @isect_cl;
print
" number of links in the intersection of key and response coreference links: "
. $num_isect_cl . "\n"
if ($VERBOSE > 2);
my $num_isect_ncl = @isect_ncl;
print
"number of links in the intersection of key and response non-coreference links: "
. $num_isect_ncl . "\n"
if ($VERBOSE > 2);
my $num_key_coreference_links = keys %$key_coreference_links;
print "number of key coreference links: " . $num_key_coreference_links . "\n"
if ($VERBOSE > 2);
my $num_response_coreference_links = keys %$response_coreference_links;
print "number of response coreference links: "
. $num_response_coreference_links . "\n"
if ($VERBOSE > 2);
my $num_key_non_coreference_links = keys %$key_non_coreference_links;
print "number of key non-coreference links: "
. $num_key_non_coreference_links . "\n"
if ($VERBOSE > 2);
my $num_response_non_coreference_links =
keys %$response_non_coreference_links;
print "number of response non-coreference links: "
. $num_response_non_coreference_links . "\n"
if ($VERBOSE > 2);
my ($r_blanc, $p_blanc, $f_blanc) = ComputeBLANCFromCounts(
$num_isect_cl, $num_key_coreference_links,
$num_response_coreference_links, $num_isect_ncl,
$num_key_non_coreference_links, $num_response_non_coreference_links
);
print " blanc recall: " . $r_blanc . "\n" if ($VERBOSE > 2);
print "blanc precision: " . $p_blanc . "\n" if ($VERBOSE > 2);
print " blanc score: " . $f_blanc . "\n" if ($VERBOSE > 2);
print
">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"
if ($VERBOSE > 2);
return (
$num_isect_cl, $num_key_coreference_links,
$num_isect_cl, $num_response_coreference_links,
$num_isect_ncl, $num_key_non_coreference_links,
$num_isect_ncl, $num_response_non_coreference_links
);
}
################################################################################
# Compute BLANC recall, precision and F-measure from counts.
# Parameters:
# (#correct_coref_links, #key_coref_links, #response_coref_links,
# #correct_noncoref_links, #key_noncoref_links, #response_noncoref_links).
# Returns: (recall, precision, F-measure).
################################################################################
sub ComputeBLANCFromCounts {
my (
$num_isect_cl, $num_key_coreference_links,
$num_response_coreference_links, $num_isect_ncl,
$num_key_non_coreference_links, $num_response_non_coreference_links
) = @_;
my $kcl_recall =
($num_key_coreference_links == 0)
? 0
: ($num_isect_cl / $num_key_coreference_links);
my $kcl_precision =
($num_response_coreference_links == 0)
? 0
: ($num_isect_cl / $num_response_coreference_links);
print
"<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"
if ($VERBOSE > 2);
print " coreference recall: " . $kcl_recall . "\n" if ($VERBOSE > 2);
print " coreference precision: " . $kcl_precision . "\n" if ($VERBOSE > 2);
my $fcl =
($kcl_recall + $kcl_precision == 0)
? 0
: (2 * $kcl_recall * $kcl_precision / ($kcl_recall + $kcl_precision));
print " coreference f-score: " . $fcl . "\n" if ($VERBOSE > 2);
my $kncl_recall =
($num_key_non_coreference_links == 0)
? 0
: ($num_isect_ncl / $num_key_non_coreference_links);
my $kncl_precision =
($num_response_non_coreference_links == 0)
? 0
: ($num_isect_ncl / $num_response_non_coreference_links);
print
"--------------------------------------------------------------------------------\n"
if ($VERBOSE > 2);
print " non-coreference recall: " . $kncl_recall . "\n" if ($VERBOSE > 2);
print "non-coreference precision: " . $kncl_precision . "\n"
if ($VERBOSE > 2);
my $fncl =
($kncl_recall + $kncl_precision == 0)
? 0
: (2 * $kncl_recall * $kncl_precision / ($kncl_recall + $kncl_precision));
print " non-coreference f-score: " . $fncl . "\n" if ($VERBOSE > 2);
print
"--------------------------------------------------------------------------------\n"
if ($VERBOSE > 2);
my $r_blanc = -1;
my $p_blanc = -1;
my $f_blanc = -1;
if ($num_key_coreference_links == 0 && $num_key_non_coreference_links == 0) {
$r_blanc = 0;
$p_blanc = 0;
$f_blanc = 0;
}
elsif ($num_key_coreference_links == 0 || $num_key_non_coreference_links == 0)
{
if ($num_key_coreference_links == 0) {
$r_blanc = $kncl_recall;
$p_blanc = $kncl_precision;
$f_blanc = $fncl;
}
elsif ($num_key_non_coreference_links == 0) {
$r_blanc = $kcl_recall;
$p_blanc = $kcl_precision;
$f_blanc = $fcl;
}
}
else {
$r_blanc = ($kcl_recall + $kncl_recall) / 2;
$p_blanc = ($kcl_precision + $kncl_precision) / 2;
$f_blanc = ($fcl + $fncl) / 2;
}
return ($r_blanc, $p_blanc, $f_blanc);
}
1;
| neuralcoref/neuralcoref/train/scorer/lib/CorScorer.pm/0 | {
"file_path": "neuralcoref/neuralcoref/train/scorer/lib/CorScorer.pm",
"repo_id": "neuralcoref",
"token_count": 17657
} | 306 |
#begin document (nw/xinhua/00/chtb_0009); part 000
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 (1)
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 (1)
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 (1)
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 (1)
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
nw/xinhua/00/chtb_0009 -
#end document
| neuralcoref/neuralcoref/train/scorer/test/DataFiles/TC-G-1.response/0 | {
"file_path": "neuralcoref/neuralcoref/train/scorer/test/DataFiles/TC-G-1.response",
"repo_id": "neuralcoref",
"token_count": 454
} | 307 |
<jupyter_start><jupyter_text>Que peuvent faire les *transformers* ? Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece]
from transformers import pipeline<jupyter_output><empty_output><jupyter_text>Analyse de sentiments<jupyter_code>classifier = pipeline("sentiment-analysis")
classifier("I've been waiting for a HuggingFace course my whole life.")
classifier = pipeline("sentiment-analysis", model="tblard/tf-allocine")
classifier("J'ai attendu un cours d'HuggingFace toute ma vie.")<jupyter_output><empty_output><jupyter_text>Intéressant ! On observe que le résultat est négatif là où pour la version en anglais le résultat est positif.<jupyter_code>classifier(
["J'ai attendu un cours d'HuggingFace toute ma vie.",
"Je déteste tellement ça !"]
) # pour classifier plusieurs phrases<jupyter_output><empty_output><jupyter_text>La phrase "J'ai attendu un cours d'HuggingFace toute ma vie." qui était précedemment négative devient à présent positive. Zéro shot classification<jupyter_code>classifier = pipeline("zero-shot-classification", model="BaptisteDoyen/camembert-base-xnli")
classifier(
"C'est un cours sur la bibliothèque Transformers",
candidate_labels=["éducation", "politique", "affaires"],
)<jupyter_output><empty_output><jupyter_text>Génération de texte<jupyter_code>generator = pipeline("text-generation", model="asi/gpt-fr-cased-small")
generator("# Dans ce cours, nous vous enseignerons comment")
generator = pipeline("text-generation", model="asi/gpt-fr-cased-small")
generator(
"# Dans ce cours, nous vous enseignerons comment",
max_length=30,
num_return_sequences=1,
)<jupyter_output><empty_output><jupyter_text>Remplacement des mots masqués<jupyter_code>unmasker = pipeline("fill-mask", model="camembert-base")
unmasker(" Ce cours vous apprendra tout sur les modèles <mask>.", top_k=2)<jupyter_output><empty_output><jupyter_text>Reconnaissance d'entités nommées<jupyter_code>ner = pipeline("ner", model="Jean-Baptiste/camembert-ner", grouped_entities=True)
ner("Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.")<jupyter_output><empty_output><jupyter_text>Réponse à des questions<jupyter_code>question_answerer = pipeline("question-answering", model="etalab-ia/camembert-base-squadFR-fquad-piaf")
question_answerer(
question="Où est-ce que je travaille ?",
context="Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.",
)<jupyter_output><empty_output><jupyter_text>Résumé<jupyter_code>summarizer = pipeline("summarization", model="moussaKam/barthez-orangesum-abstract")
summarizer(
"""
L'Amérique a changé de façon spectaculaire au cours des dernières années. Non seulement le nombre de
diplômés dans les disciplines traditionnelles de l'ingénierie telles que le génie mécanique, civil,
l'électricité, la chimie et l'aéronautique a diminué, mais dans la plupart
des grandes universités américaines, les programmes d'études d'ingénierie se concentrent désormais sur
et encouragent largement l'étude des sciences de l'ingénieur. Par conséquent, il y a
de moins en moins d'offres dans les sujets d'ingénierie traitant de l'infrastructure,
l'environnement et les questions connexes, et une plus grande concentration sur les sujets de haute
technologie, qui soutiennent en grande partie des développements scientifiques de plus en plus
complexes. Si cette dernière est importante, elle ne doit pas se faire au détriment
de l'ingénierie plus traditionnelle.
Les économies en développement rapide telles que la Chine et l'Inde, ainsi que d'autres
pays industrialisés d'Europe et d'Asie, continuent d'encourager et de promouvoir
l'enseignement de l'ingénierie. La Chine et l'Inde, respectivement, diplôment
six et huit fois plus d'ingénieurs traditionnels que les États-Unis.
Les autres pays industriels maintiennent au minimum leur production, tandis que l'Amérique
souffre d'une baisse de plus en plus importante du nombre de diplômés en ingénierie
et un manque d'ingénieurs bien formés.
"""
)<jupyter_output><empty_output><jupyter_text>Traduction<jupyter_code>translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-fr")
translator("This course is produced by Hugging Face.")<jupyter_output><empty_output> | notebooks/course/fr/chapter1/section3.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter1/section3.ipynb",
"repo_id": "notebooks",
"token_count": 1607
} | 308 |
<jupyter_start><jupyter_text>Finetuner un modèle avec l'API Trainer Installez les bibliothèques 🤗 Transformers et 🤗 Datasets pour exécuter ce notebook.<jupyter_code>!pip install datasets transformers[sentencepiece]
from datasets import load_dataset
from transformers import AutoTokenizer, DataCollatorWithPadding
raw_datasets = load_dataset("paws-x", "fr")
checkpoint = "camembert-base"
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
def tokenize_function(example):
return tokenizer(example["sentence1"], example["sentence2"], truncation=True)
tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
from transformers import TrainingArguments
training_args = TrainingArguments("test-trainer")
from transformers import AutoModelForSequenceClassification
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
from transformers import Trainer
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
)
trainer.train() # Attention, une epoch prend 12h !
predictions = trainer.predict(tokenized_datasets["validation"])
print(predictions.predictions.shape, predictions.label_ids.shape)
import numpy as np
preds = np.argmax(predictions.predictions, axis=-1)
from datasets import load_metric
metric = load_metric("glue", "mrpc")
metric.compute(predictions=preds, references=predictions.label_ids)
def compute_metrics(eval_preds):
metric = load_metric("glue", "mrpc")
logits, labels = eval_preds
predictions = np.argmax(logits, axis=-1)
return metric.compute(predictions=predictions, references=labels)
training_args = TrainingArguments("test-trainer", evaluation_strategy="epoch")
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
trainer = Trainer(
model,
training_args,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["validation"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics,
)<jupyter_output><empty_output> | notebooks/course/fr/chapter3/section3.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter3/section3.ipynb",
"repo_id": "notebooks",
"token_count": 754
} | 309 |
<jupyter_start><jupyter_text>Les pouvoirs spéciaux des *tokenizers* rapides (TensorFlow) Installez les bibliothèques 🤗 *Transformers* et 🤗 *Datasets* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("camembert-base")
example = "Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn."
encoding = tokenizer(example)
print(type(encoding))
tokenizer.is_fast
encoding.is_fast
encoding.tokens()
encoding.word_ids()
start, end = encoding.word_to_chars(3)
example[start:end]
from transformers import pipeline
token_classifier = pipeline("token-classification", model="Jean-Baptiste/camembert-ner")
token_classifier("Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.")
from transformers import pipeline
token_classifier = pipeline("token-classification", model="Jean-Baptiste/camembert-ner", aggregation_strategy="simple")
token_classifier("Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn.")
from transformers import AutoTokenizer, TFAutoModelForTokenClassification
model_checkpoint = "Jean-Baptiste/camembert-ner"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
model = TFAutoModelForTokenClassification.from_pretrained(model_checkpoint, from_pt=True)
example = "Je m'appelle Sylvain et je travaille à Hugging Face à Brooklyn."
inputs = tokenizer(example, return_tensors="tf")
outputs = model(**inputs)
print(inputs["input_ids"].shape)
print(outputs.logits.shape)
import tensorflow as tf
probabilities = tf.math.softmax(outputs.logits, axis=-1)[0]
probabilities = probabilities.numpy().tolist()
predictions = tf.math.argmax(outputs.logits, axis=-1)[0]
predictions = predictions.numpy().tolist()
print(predictions)
model.config.id2label
results = []
tokens = inputs.tokens()
for idx, pred in enumerate(predictions):
label = model.config.id2label[pred]
if label != "O":
results.append(
{"entity": label, "score": probabilities[idx][pred], "word": tokens[idx]}
)
print(results)
inputs_with_offsets = tokenizer(example, return_offsets_mapping=True)
inputs_with_offsets["offset_mapping"]
example[12:14]
results = []
inputs_with_offsets = tokenizer(example, return_offsets_mapping=True)
tokens = inputs_with_offsets.tokens()
offsets = inputs_with_offsets["offset_mapping"]
for idx, pred in enumerate(predictions):
label = model.config.id2label[pred]
if label != "O":
start, end = offsets[idx]
results.append(
{
"entity": label,
"score": probabilities[idx][pred],
"word": tokens[idx],
"start": start,
"end": end,
}
)
print(results)
example[33:45]
import numpy as np
results = []
inputs_with_offsets = tokenizer(example, return_offsets_mapping=True)
tokens = inputs_with_offsets.tokens()
offsets = inputs_with_offsets["offset_mapping"]
idx = 0
while idx < len(predictions):
pred = predictions[idx]
label = model.config.id2label[pred]
if label != "O":
# Enlevez le B- ou le I-
label = label[2:]
start, _ = offsets[idx]
# Récupérer tous les tokens étiquetés avec I-label
all_scores = []
while (
idx < len(predictions)
and model.config.id2label[predictions[idx]] == f"I-{label}"
):
all_scores.append(probabilities[idx][pred])
_, end = offsets[idx]
idx += 1
# Le score est la moyenne de tous les scores des tokens de cette entité groupée
score = np.mean(all_scores).item()
word = example[start:end]
results.append(
{
"entity_group": label,
"score": score,
"word": word,
"start": start,
"end": end,
}
)
idx += 1
print(results)<jupyter_output><empty_output> | notebooks/course/fr/chapter6/section3_tf.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter6/section3_tf.ipynb",
"repo_id": "notebooks",
"token_count": 1646
} | 310 |
<jupyter_start><jupyter_text>Entraîner un modèle de langage causal de zéro (PyTorch)Ici nous entraînons un modèle à générer du code Python. Le Python utilisant des fonctions basées sur des mots anglais, nous gardons un gpt-2 anglais dans l'optique d'obtenir de meilleures performances que ce que l'on pourrait s'attendre en utilisant un gpt-2 en français. Installez les bibliothèques 🤗 *Datasets* et 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece]
!pip install accelerate
# Pour exécuter l'entraînement sur TPU, vous devez décommenter la ligne suivante :
# !pip install cloud-tpu-client==0.10 torch==1.9.0 https://storage.googleapis.com/tpu-pytorch/wheels/torch_xla-1.9-cp37-cp37m-linux_x86_64.whl
!apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "you@example.com"
!git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login
notebook_login()
def any_keyword_in_string(string, keywords):
for keyword in keywords:
if keyword in string:
return True
return False
filters = ["pandas", "sklearn", "matplotlib", "seaborn"]
example_1 = "import numpy as np"
example_2 = "import pandas as pd"
print(
any_keyword_in_string(example_1, filters), any_keyword_in_string(example_2, filters)
)
from collections import defaultdict
from tqdm import tqdm
from datasets import Dataset
def filter_streaming_dataset(dataset, filters):
filtered_dict = defaultdict(list)
total = 0
for sample in tqdm(iter(dataset)):
total += 1
if any_keyword_in_string(sample["content"], filters):
for k, v in sample.items():
filtered_dict[k].append(v)
print(f"{len(filtered_dict['content'])/total:.2%} of data after filtering.")
return Dataset.from_dict(filtered_dict)
# Cette cellule prendra beaucoup de temps à s'exécuter, donc vous devriez la sauter et aller à la suivante !
from datasets import load_dataset
split = "train" # "valid"
filters = ["pandas", "sklearn", "matplotlib", "seaborn"]
data = load_dataset(f"transformersbook/codeparrot-{split}", split=split, streaming=True)
filtered_data = filter_streaming_dataset(data, filters)
from datasets import load_dataset, DatasetDict
ds_train = load_dataset("huggingface-course/codeparrot-ds-train", split="train")
ds_valid = load_dataset("huggingface-course/codeparrot-ds-valid", split="validation")
raw_datasets = DatasetDict(
{
"train": ds_train, # .shuffle().select(range(50000)),
"valid": ds_valid, # .shuffle().select(range(500))
}
)
raw_datasets
for key in raw_datasets["train"][0]:
print(f"{key.upper()}: {raw_datasets['train'][0][key][:200]}")
from transformers import AutoTokenizer
context_length = 128
tokenizer = AutoTokenizer.from_pretrained("huggingface-course/code-search-net-tokenizer")
outputs = tokenizer(
raw_datasets["train"][:2]["content"],
truncation=True,
max_length=context_length,
return_overflowing_tokens=True,
return_length=True,
)
print(f"Input IDs length: {len(outputs['input_ids'])}")
print(f"Input chunk lengths: {(outputs['length'])}")
print(f"Chunk mapping: {outputs['overflow_to_sample_mapping']}")
def tokenize(element):
outputs = tokenizer(
element["content"],
truncation=True,
max_length=context_length,
return_overflowing_tokens=True,
return_length=True,
)
input_batch = []
for length, input_ids in zip(outputs["length"], outputs["input_ids"]):
if length == context_length:
input_batch.append(input_ids)
return {"input_ids": input_batch}
tokenized_datasets = raw_datasets.map(
tokenize, batched=True, remove_columns=raw_datasets["train"].column_names
)
tokenized_datasets
from transformers import AutoTokenizer, GPT2LMHeadModel, AutoConfig
config = AutoConfig.from_pretrained(
"gpt2",
vocab_size=len(tokenizer),
n_ctx=context_length,
bos_token_id=tokenizer.bos_token_id,
eos_token_id=tokenizer.eos_token_id,
)
model = GPT2LMHeadModel(config)
model_size = sum(t.numel() for t in model.parameters())
print(f"GPT-2 size: {model_size/1000**2:.1f}M parameters")
from transformers import DataCollatorForLanguageModeling
tokenizer.pad_token = tokenizer.eos_token
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
out = data_collator([tokenized_datasets["train"][i] for i in range(5)])
for key in out:
print(f"{key} shape: {out[key].shape}")
from huggingface_hub import notebook_login
notebook_login()
from transformers import Trainer, TrainingArguments
args = TrainingArguments(
output_dir="codeparrot-ds",
per_device_train_batch_size=32,
per_device_eval_batch_size=32,
evaluation_strategy="steps",
eval_steps=5_000,
logging_steps=5_000,
gradient_accumulation_steps=8,
num_train_epochs=1,
weight_decay=0.1,
warmup_steps=1_000,
lr_scheduler_type="cosine",
learning_rate=5e-4,
save_steps=5_000,
fp16=True,
push_to_hub=True,
)
trainer = Trainer(
model=model,
tokenizer=tokenizer,
args=args,
data_collator=data_collator,
train_dataset=tokenized_datasets["train"],
eval_dataset=tokenized_datasets["valid"],
)
trainer.train()
trainer.push_to_hub()
import torch
from transformers import pipeline
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
pipe = pipeline(
"text-generation", model="huggingface-course/codeparrot-ds", device=device
)
txt = """\
# create some data
x = np.random.randn(100)
y = np.random.randn(100)
# create scatter plot with x, y
"""
print(pipe(txt, num_return_sequences=1)[0]["generated_text"])
txt = """\
# create some data
x = np.random.randn(100)
y = np.random.randn(100)
# create dataframe from x and y
"""
print(pipe(txt, num_return_sequences=1)[0]["generated_text"])
txt = """\
# dataframe with profession, income and name
df = pd.DataFrame({'profession': x, 'income':y, 'name': z})
# calculate the mean income per profession
"""
print(pipe(txt, num_return_sequences=1)[0]["generated_text"])
txt = """
# import random forest regressor from scikit-learn
from sklearn.ensemble import RandomForestRegressor
# fit random forest model with 300 estimators on X, y:
"""
print(pipe(txt, num_return_sequences=1)[0]["generated_text"])
keytoken_ids = []
for keyword in [
"plt",
"pd",
"sk",
"fit",
"predict",
" plt",
" pd",
" sk",
" fit",
" predict",
"testtest",
]:
ids = tokenizer([keyword]).input_ids[0]
if len(ids) == 1:
keytoken_ids.append(ids[0])
else:
print(f"Keyword has not single token: {keyword}")
from torch.nn import CrossEntropyLoss
import torch
def keytoken_weighted_loss(inputs, logits, keytoken_ids, alpha=1.0):
# Décalage pour que tokens < n prédise n
shift_labels = inputs[..., 1:].contiguous()
shift_logits = logits[..., :-1, :].contiguous()
# Calculer la perte par token
loss_fct = CrossEntropyLoss(reduce=False)
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
# Redimensionnement et perte moyenne par échantillon
loss_per_sample = loss.view(shift_logits.size(0), shift_logits.size(1)).mean(axis=1)
# Calculer et échelonner la pondération
weights = torch.stack([(inputs == kt).float() for kt in keytoken_ids]).sum(
axis=[0, 2]
)
weights = alpha * (1.0 + weights)
# Calculer la moyenne pondérée
weighted_loss = (loss_per_sample * weights).mean()
return weighted_loss
from torch.utils.data.dataloader import DataLoader
tokenized_dataset.set_format("torch")
train_dataloader = DataLoader(tokenized_dataset["train"], batch_size=32, shuffle=True)
eval_dataloader = DataLoader(tokenized_dataset["valid"], batch_size=32)
weight_decay = 0.1
def get_grouped_params(model, no_decay=["bias", "LayerNorm.weight"]):
params_with_wd, params_without_wd = [], []
for n, p in model.named_parameters():
if any(nd in n for nd in no_decay):
params_without_wd.append(p)
else:
params_with_wd.append(p)
return [
{"params": params_with_wd, "weight_decay": weight_decay},
{"params": params_without_wd, "weight_decay": 0.0},
]
def evaluate():
model.eval()
losses = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
outputs = model(batch["input_ids"], labels=batch["input_ids"])
losses.append(accelerator.gather(outputs.loss))
loss = torch.mean(torch.cat(losses))
try:
perplexity = torch.exp(loss)
except OverflowError:
perplexity = float("inf")
return loss.item(), perplexity.item()
model = GPT2LMHeadModel(config)
from torch.optim import AdamW
optimizer = AdamW(get_grouped_params(model), lr=5e-4)
from accelerate import Accelerator
accelerator = Accelerator(fp16=True)
model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader
)
from transformers import get_scheduler
num_train_epochs = 1
num_update_steps_per_epoch = len(train_dataloader)
num_training_steps = num_train_epochs * num_update_steps_per_epoch
lr_scheduler = get_scheduler(
name="linear",
optimizer=optimizer,
num_warmup_steps=1_000,
num_training_steps=num_training_steps,
)
from huggingface_hub import Repository, get_full_repo_name
model_name = "codeparrot-ds-accelerate"
repo_name = get_full_repo_name(model_name)
repo_name
output_dir = "codeparrot-ds-accelerate"
repo = Repository(output_dir, clone_from=repo_name)
evaluate()
from tqdm.notebook import tqdm
gradient_accumulation_steps = 8
eval_steps = 5_000
model.train()
completed_steps = 0
for epoch in range(num_train_epochs):
for step, batch in tqdm(
enumerate(train_dataloader, start=1), total=num_training_steps
):
logits = model(batch["input_ids"]).logits
loss = keytoken_weighted_loss(batch["input_ids"], logits, keytoken_ids)
if step % 100 == 0:
accelerator.print(
{
"lr": get_lr(),
"samples": step * samples_per_step,
"steps": completed_steps,
"loss/train": loss.item() * gradient_accumulation_steps,
}
)
loss = loss / gradient_accumulation_steps
accelerator.backward(loss)
if step % gradient_accumulation_steps == 0:
accelerator.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
completed_steps += 1
if (step % (eval_steps * gradient_accumulation_steps)) == 0:
eval_loss, perplexity = evaluate()
accelerator.print({"loss/eval": eval_loss, "perplexity": perplexity})
model.train()
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(output_dir, save_function=accelerator.save)
if accelerator.is_main_process:
tokenizer.save_pretrained(output_dir)
repo.push_to_hub(
commit_message=f"Training in progress step {step}", blocking=False
)<jupyter_output><empty_output> | notebooks/course/fr/chapter7/section6_pt.ipynb/0 | {
"file_path": "notebooks/course/fr/chapter7/section6_pt.ipynb",
"repo_id": "notebooks",
"token_count": 4772
} | 311 |
<jupyter_start><jupyter_text>Dreambooth fine-tuning for Stable Diffusion using d🧨ffusers This notebook shows how to "teach" Stable Diffusion a new concept via Dreambooth using 🤗 Hugging Face [🧨 Diffusers library](https://github.com/huggingface/diffusers). ![Dreambooth Example](https://dreambooth.github.io/DreamBooth_files/teaser_static.jpg)_By using just 3-5 images you can teach new concepts to Stable Diffusion and personalize the model on your own images_ Differently from Textual Inversion, this approach trains the whole model, which can yield better results to the cost of bigger models.For a general introduction to the Stable Diffusion model please refer to this [colab](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb). Initial setup<jupyter_code>#@title Install the required libs
!pip install -U -qq git+https://github.com/huggingface/diffusers.git
!pip install -qq accelerate tensorboard transformers ftfy gradio
!pip install -qq "ipywidgets>=7,<8"
!pip install -qq bitsandbytes
#@title [Optional] Install xformers for faster and memory efficient training
#@markdown Acknowledgement: The xformers wheel are taken from [TheLastBen/fast-stable-diffusion](https://github.com/TheLastBen/fast-stable-diffusion). Thanks a lot for building these wheels!
%%time
!pip install -U --pre triton
from subprocess import getoutput
from IPython.display import HTML
from IPython.display import clear_output
import time
s = getoutput('nvidia-smi')
if 'T4' in s:
gpu = 'T4'
elif 'P100' in s:
gpu = 'P100'
elif 'V100' in s:
gpu = 'V100'
elif 'A100' in s:
gpu = 'A100'
while True:
try:
gpu=='T4'or gpu=='P100'or gpu=='V100'or gpu=='A100'
break
except:
pass
print('[1;31mit seems that your GPU is not supported at the moment')
time.sleep(5)
if (gpu=='T4'):
%pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/T4/xformers-0.0.13.dev0-py3-none-any.whl
elif (gpu=='P100'):
%pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/P100/xformers-0.0.13.dev0-py3-none-any.whl
elif (gpu=='V100'):
%pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/V100/xformers-0.0.13.dev0-py3-none-any.whl
elif (gpu=='A100'):
%pip install -q https://github.com/TheLastBen/fast-stable-diffusion/raw/main/precompiled/A100/xformers-0.0.13.dev0-py3-none-any.whl
#@title Import required libraries
import argparse
import itertools
import math
import os
from contextlib import nullcontext
import random
import numpy as np
import torch
import torch.nn.functional as F
import torch.utils.checkpoint
from torch.utils.data import Dataset
import PIL
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from diffusers import AutoencoderKL, DDPMScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel
from diffusers.optimization import get_scheduler
from diffusers.pipelines.stable_diffusion import StableDiffusionSafetyChecker
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer
import bitsandbytes as bnb
def image_grid(imgs, rows, cols):
assert len(imgs) == rows*cols
w, h = imgs[0].size
grid = Image.new('RGB', size=(cols*w, rows*h))
grid_w, grid_h = grid.size
for i, img in enumerate(imgs):
grid.paste(img, box=(i%cols*w, i//cols*h))
return grid<jupyter_output><empty_output><jupyter_text>Settings for teaching your new concept<jupyter_code>#@markdown `pretrained_model_name_or_path` which Stable Diffusion checkpoint you want to use
pretrained_model_name_or_path = "stabilityai/stable-diffusion-2" #@param ["stabilityai/stable-diffusion-2", "stabilityai/stable-diffusion-2-base", "CompVis/stable-diffusion-v1-4", "runwayml/stable-diffusion-v1-5"] {allow-input: true}
#@markdown Add here the URLs to the images of the concept you are adding. 3-5 should be fine
urls = [
"https://huggingface.co/datasets/valhalla/images/resolve/main/2.jpeg",
"https://huggingface.co/datasets/valhalla/images/resolve/main/3.jpeg",
"https://huggingface.co/datasets/valhalla/images/resolve/main/5.jpeg",
"https://huggingface.co/datasets/valhalla/images/resolve/main/6.jpeg",
## You can add additional images here
]
#@title Setup and check the images you have just added
import requests
import glob
from io import BytesIO
def download_image(url):
try:
response = requests.get(url)
except:
return None
return Image.open(BytesIO(response.content)).convert("RGB")
images = list(filter(None,[download_image(url) for url in urls]))
save_path = "./my_concept"
if not os.path.exists(save_path):
os.mkdir(save_path)
[image.save(f"{save_path}/{i}.jpeg") for i, image in enumerate(images)]
image_grid(images, 1, len(images))
#@title Settings for your newly created concept
#@markdown `instance_prompt` is a prompt that should contain a good description of what your object or style is, together with the initializer word `cat_toy`
instance_prompt = "<cat-toy> toy" #@param {type:"string"}
#@markdown Check the `prior_preservation` option if you would like class of the concept (e.g.: toy, dog, painting) is guaranteed to be preserved. This increases the quality and helps with generalization at the cost of training time
prior_preservation = False #@param {type:"boolean"}
prior_preservation_class_prompt = "a photo of a cat clay toy" #@param {type:"string"}
num_class_images = 12
sample_batch_size = 2
prior_loss_weight = 0.5
prior_preservation_class_folder = "./class_images"
class_data_root=prior_preservation_class_folder
class_prompt=prior_preservation_class_prompt<jupyter_output><empty_output><jupyter_text>Advanced settings for prior preservation (optional)<jupyter_code>num_class_images = 12 #@param {type: "number"}
sample_batch_size = 2
#@markdown `prior_preservation_weight` determins how strong the class for prior preservation should be
prior_loss_weight = 1 #@param {type: "number"}
#@markdown If the `prior_preservation_class_folder` is empty, images for the class will be generated with the class prompt. Otherwise, fill this folder with images of items on the same class as your concept (but not images of the concept itself)
prior_preservation_class_folder = "./class_images" #@param {type:"string"}
class_data_root=prior_preservation_class_folder<jupyter_output><empty_output><jupyter_text>Teach the model the new concept (fine-tuning with Dreambooth)Execute this this sequence of cells to run the training process. The whole process may take from 15 min to 2 hours. (Open this block if you are interested in how this process works under the hood or if you want to change advanced training settings or hyperparameters)<jupyter_code>#@title Setup the Classes
from pathlib import Path
from torchvision import transforms
class DreamBoothDataset(Dataset):
def __init__(
self,
instance_data_root,
instance_prompt,
tokenizer,
class_data_root=None,
class_prompt=None,
size=512,
center_crop=False,
):
self.size = size
self.center_crop = center_crop
self.tokenizer = tokenizer
self.instance_data_root = Path(instance_data_root)
if not self.instance_data_root.exists():
raise ValueError("Instance images root doesn't exists.")
self.instance_images_path = list(Path(instance_data_root).iterdir())
self.num_instance_images = len(self.instance_images_path)
self.instance_prompt = instance_prompt
self._length = self.num_instance_images
if class_data_root is not None:
self.class_data_root = Path(class_data_root)
self.class_data_root.mkdir(parents=True, exist_ok=True)
self.class_images_path = list(Path(class_data_root).iterdir())
self.num_class_images = len(self.class_images_path)
self._length = max(self.num_class_images, self.num_instance_images)
self.class_prompt = class_prompt
else:
self.class_data_root = None
self.image_transforms = transforms.Compose(
[
transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR),
transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __len__(self):
return self._length
def __getitem__(self, index):
example = {}
instance_image = Image.open(self.instance_images_path[index % self.num_instance_images])
if not instance_image.mode == "RGB":
instance_image = instance_image.convert("RGB")
example["instance_images"] = self.image_transforms(instance_image)
example["instance_prompt_ids"] = self.tokenizer(
self.instance_prompt,
padding="do_not_pad",
truncation=True,
max_length=self.tokenizer.model_max_length,
).input_ids
if self.class_data_root:
class_image = Image.open(self.class_images_path[index % self.num_class_images])
if not class_image.mode == "RGB":
class_image = class_image.convert("RGB")
example["class_images"] = self.image_transforms(class_image)
example["class_prompt_ids"] = self.tokenizer(
self.class_prompt,
padding="do_not_pad",
truncation=True,
max_length=self.tokenizer.model_max_length,
).input_ids
return example
class PromptDataset(Dataset):
def __init__(self, prompt, num_samples):
self.prompt = prompt
self.num_samples = num_samples
def __len__(self):
return self.num_samples
def __getitem__(self, index):
example = {}
example["prompt"] = self.prompt
example["index"] = index
return example
#@title Generate Class Images
import gc
if(prior_preservation):
class_images_dir = Path(class_data_root)
if not class_images_dir.exists():
class_images_dir.mkdir(parents=True)
cur_class_images = len(list(class_images_dir.iterdir()))
if cur_class_images < num_class_images:
pipeline = StableDiffusionPipeline.from_pretrained(
pretrained_model_name_or_path, revision="fp16", torch_dtype=torch.float16
).to("cuda")
pipeline.enable_attention_slicing()
pipeline.set_progress_bar_config(disable=True)
num_new_images = num_class_images - cur_class_images
print(f"Number of class images to sample: {num_new_images}.")
sample_dataset = PromptDataset(class_prompt, num_new_images)
sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=sample_batch_size)
for example in tqdm(sample_dataloader, desc="Generating class images"):
images = pipeline(example["prompt"]).images
for i, image in enumerate(images):
image.save(class_images_dir / f"{example['index'][i] + cur_class_images}.jpg")
pipeline = None
gc.collect()
del pipeline
with torch.no_grad():
torch.cuda.empty_cache()
#@title Load the Stable Diffusion model
# Load models and create wrapper for stable diffusion
text_encoder = CLIPTextModel.from_pretrained(
pretrained_model_name_or_path, subfolder="text_encoder"
)
vae = AutoencoderKL.from_pretrained(
pretrained_model_name_or_path, subfolder="vae"
)
unet = UNet2DConditionModel.from_pretrained(
pretrained_model_name_or_path, subfolder="unet"
)
tokenizer = CLIPTokenizer.from_pretrained(
pretrained_model_name_or_path,
subfolder="tokenizer",
)
#@title Setting up all training args
from argparse import Namespace
args = Namespace(
pretrained_model_name_or_path=pretrained_model_name_or_path,
resolution=vae.sample_size,
center_crop=True,
train_text_encoder=False,
instance_data_dir=save_path,
instance_prompt=instance_prompt,
learning_rate=5e-06,
max_train_steps=300,
save_steps=50,
train_batch_size=2, # set to 1 if using prior preservation
gradient_accumulation_steps=2,
max_grad_norm=1.0,
mixed_precision="fp16", # set to "fp16" for mixed-precision training.
gradient_checkpointing=True, # set this to True to lower the memory usage.
use_8bit_adam=True, # use 8bit optimizer from bitsandbytes
seed=3434554,
with_prior_preservation=prior_preservation,
prior_loss_weight=prior_loss_weight,
sample_batch_size=2,
class_data_dir=prior_preservation_class_folder,
class_prompt=prior_preservation_class_prompt,
num_class_images=num_class_images,
lr_scheduler="constant",
lr_warmup_steps=100,
output_dir="dreambooth-concept",
)
#@title Training function
from accelerate.utils import set_seed
def training_function(text_encoder, vae, unet):
logger = get_logger(__name__)
set_seed(args.seed)
accelerator = Accelerator(
gradient_accumulation_steps=args.gradient_accumulation_steps,
mixed_precision=args.mixed_precision,
)
# Currently, it's not possible to do gradient accumulation when training two models with accelerate.accumulate
# This will be enabled soon in accelerate. For now, we don't allow gradient accumulation when training two models.
# TODO (patil-suraj): Remove this check when gradient accumulation with two models is enabled in accelerate.
if args.train_text_encoder and args.gradient_accumulation_steps > 1 and accelerator.num_processes > 1:
raise ValueError(
"Gradient accumulation is not supported when training the text encoder in distributed training. "
"Please set gradient_accumulation_steps to 1. This feature will be supported in the future."
)
vae.requires_grad_(False)
if not args.train_text_encoder:
text_encoder.requires_grad_(False)
if args.gradient_checkpointing:
unet.enable_gradient_checkpointing()
if args.train_text_encoder:
text_encoder.gradient_checkpointing_enable()
# Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
if args.use_8bit_adam:
optimizer_class = bnb.optim.AdamW8bit
else:
optimizer_class = torch.optim.AdamW
params_to_optimize = (
itertools.chain(unet.parameters(), text_encoder.parameters()) if args.train_text_encoder else unet.parameters()
)
optimizer = optimizer_class(
params_to_optimize,
lr=args.learning_rate,
)
noise_scheduler = DDPMScheduler.from_config(args.pretrained_model_name_or_path, subfolder="scheduler")
train_dataset = DreamBoothDataset(
instance_data_root=args.instance_data_dir,
instance_prompt=args.instance_prompt,
class_data_root=args.class_data_dir if args.with_prior_preservation else None,
class_prompt=args.class_prompt,
tokenizer=tokenizer,
size=args.resolution,
center_crop=args.center_crop,
)
def collate_fn(examples):
input_ids = [example["instance_prompt_ids"] for example in examples]
pixel_values = [example["instance_images"] for example in examples]
# concat class and instance examples for prior preservation
if args.with_prior_preservation:
input_ids += [example["class_prompt_ids"] for example in examples]
pixel_values += [example["class_images"] for example in examples]
pixel_values = torch.stack(pixel_values)
pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
input_ids = tokenizer.pad(
{"input_ids": input_ids},
padding="max_length",
return_tensors="pt",
max_length=tokenizer.model_max_length
).input_ids
batch = {
"input_ids": input_ids,
"pixel_values": pixel_values,
}
return batch
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=collate_fn
)
lr_scheduler = get_scheduler(
args.lr_scheduler,
optimizer=optimizer,
num_warmup_steps=args.lr_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
if args.train_text_encoder:
unet, text_encoder, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, text_encoder, optimizer, train_dataloader, lr_scheduler
)
else:
unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
unet, optimizer, train_dataloader, lr_scheduler
)
weight_dtype = torch.float32
if accelerator.mixed_precision == "fp16":
weight_dtype = torch.float16
elif accelerator.mixed_precision == "bf16":
weight_dtype = torch.bfloat16
# Move text_encode and vae to gpu.
# For mixed precision training we cast the text_encoder and vae weights to half-precision
# as these models are only used for inference, keeping weights in full precision is not required.
vae.to(accelerator.device, dtype=weight_dtype)
vae.decoder.to("cpu")
if not args.train_text_encoder:
text_encoder.to(accelerator.device, dtype=weight_dtype)
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Train!
total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
progress_bar.set_description("Steps")
global_step = 0
for epoch in range(num_train_epochs):
unet.train()
for step, batch in enumerate(train_dataloader):
with accelerator.accumulate(unet):
# Convert images to latent space
latents = vae.encode(batch["pixel_values"].to(dtype=weight_dtype)).latent_dist.sample()
latents = latents * 0.18215
# Sample noise that we'll add to the latents
noise = torch.randn_like(latents)
bsz = latents.shape[0]
# Sample a random timestep for each image
timesteps = torch.randint(0, noise_scheduler.config.num_train_timesteps, (bsz,), device=latents.device)
timesteps = timesteps.long()
# Add noise to the latents according to the noise magnitude at each timestep
# (this is the forward diffusion process)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
# Get the text embedding for conditioning
encoder_hidden_states = text_encoder(batch["input_ids"])[0]
# Predict the noise residual
noise_pred = unet(noisy_latents, timesteps, encoder_hidden_states).sample
# Get the target for loss depending on the prediction type
if noise_scheduler.config.prediction_type == "epsilon":
target = noise
elif noise_scheduler.config.prediction_type == "v_prediction":
target = noise_scheduler.get_velocity(latents, noise, timesteps)
else:
raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
if args.with_prior_preservation:
# Chunk the noise and noise_pred into two parts and compute the loss on each part separately.
noise_pred, noise_pred_prior = torch.chunk(noise_pred, 2, dim=0)
target, target_prior = torch.chunk(target, 2, dim=0)
# Compute instance loss
loss = F.mse_loss(noise_pred.float(), target.float(), reduction="none").mean([1, 2, 3]).mean()
# Compute prior loss
prior_loss = F.mse_loss(noise_pred_prior.float(), target_prior.float(), reduction="mean")
# Add the prior loss to the instance loss.
loss = loss + args.prior_loss_weight * prior_loss
else:
loss = F.mse_loss(noise_pred.float(), target.float(), reduction="mean")
accelerator.backward(loss)
if accelerator.sync_gradients:
params_to_clip = (
itertools.chain(unet.parameters(), text_encoder.parameters())
if args.train_text_encoder
else unet.parameters()
)
accelerator.clip_grad_norm_(unet.parameters(), args.max_grad_norm)
optimizer.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
global_step += 1
if global_step % args.save_steps == 0:
if accelerator.is_main_process:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
)
save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
pipeline.save_pretrained(save_path)
logs = {"loss": loss.detach().item()}
progress_bar.set_postfix(**logs)
if global_step >= args.max_train_steps:
break
accelerator.wait_for_everyone()
# Create the pipeline using using the trained modules and save it.
if accelerator.is_main_process:
pipeline = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path,
unet=accelerator.unwrap_model(unet),
text_encoder=accelerator.unwrap_model(text_encoder),
)
pipeline.save_pretrained(args.output_dir)
#@title Run training
import accelerate
accelerate.notebook_launcher(training_function, args=(text_encoder, vae, unet))
for param in itertools.chain(unet.parameters(), text_encoder.parameters()):
if param.grad is not None:
del param.grad # free some memory
torch.cuda.empty_cache()<jupyter_output>Launching training on one GPU.<jupyter_text>Run the code with your newly trained modelIf you have just trained your model with the code above, use the block below to run it.Also explore the [DreamBooth Concepts Library](https://huggingface.co/sd-dreambooth-library)<jupyter_code>#@title Save your newly created concept? you may save it privately to your personal profile or collaborate to the [library of concepts](https://huggingface.co/sd-dreambooth-library)?
#@markdown If you wish your model to be avaliable for everyone, add it to the public library. If you prefer to use your model privately, add your own profile.
save_concept = True #@param {type:"boolean"}
#@markdown Once you save it you can use your concept by loading the model on any `from_pretrained` function
name_of_your_concept = "Cat toy" #@param {type:"string"}
where_to_save_concept = "public_library" #@param ["public_library", "privately_to_my_profile"]
#@markdown `hf_token_write`: leave blank if you logged in with a token with `write access` in the [Initial Setup](#scrollTo=KbzZ9xe6dWwf). If not, [go to your tokens settings and create a write access token](https://huggingface.co/settings/tokens)
hf_token_write = "" #@param {type:"string"}
if(save_concept):
from slugify import slugify
from huggingface_hub import HfApi, HfFolder, CommitOperationAdd
from huggingface_hub import create_repo
from IPython.display import display_markdown
api = HfApi()
your_username = api.whoami()["name"]
pipe = StableDiffusionPipeline.from_pretrained(
args.output_dir,
torch_dtype=torch.float16,
).to("cuda")
os.makedirs("fp16_model",exist_ok=True)
pipe.save_pretrained("fp16_model")
if(where_to_save_concept == "public_library"):
repo_id = f"sd-dreambooth-library/{slugify(name_of_your_concept)}"
#Join the Concepts Library organization if you aren't part of it already
!curl -X POST -H 'Authorization: Bearer '$hf_token -H 'Content-Type: application/json' https://huggingface.co/organizations/sd-dreambooth-library/share/SSeOwppVCscfTEzFGQaqpfcjukVeNrKNHX
else:
repo_id = f"{your_username}/{slugify(name_of_your_concept)}"
output_dir = args.output_dir
if(not hf_token_write):
with open(HfFolder.path_token, 'r') as fin: hf_token = fin.read();
else:
hf_token = hf_token_write
images_upload = os.listdir("my_concept")
image_string = ""
#repo_id = f"sd-dreambooth-library/{slugify(name_of_your_concept)}"
for i, image in enumerate(images_upload):
image_string = f'''{image_string}![image {i}](https://huggingface.co/{repo_id}/resolve/main/concept_images/{image})
'''
readme_text = f'''---
license: creativeml-openrail-m
tags:
- text-to-image
---
### {name_of_your_concept} on Stable Diffusion via Dreambooth
#### model by {api.whoami()["name"]}
This your the Stable Diffusion model fine-tuned the {name_of_your_concept} concept taught to Stable Diffusion with Dreambooth.
It can be used by modifying the `instance_prompt`: **{instance_prompt}**
You can also train your own concepts and upload them to the library by using [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb).
And you can run your new concept via `diffusers`: [Colab Notebook for Inference](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_inference.ipynb), [Spaces with the Public Concepts loaded](https://huggingface.co/spaces/sd-dreambooth-library/stable-diffusion-dreambooth-concepts)
Here are the images used for training this concept:
{image_string}
'''
#Save the readme to a file
readme_file = open("README.md", "w")
readme_file.write(readme_text)
readme_file.close()
#Save the token identifier to a file
text_file = open("token_identifier.txt", "w")
text_file.write(instance_prompt)
text_file.close()
operations = [
CommitOperationAdd(path_in_repo="token_identifier.txt", path_or_fileobj="token_identifier.txt"),
CommitOperationAdd(path_in_repo="README.md", path_or_fileobj="README.md"),
]
create_repo(repo_id,private=True, token=hf_token)
api.create_commit(
repo_id=repo_id,
operations=operations,
commit_message=f"Upload the concept {name_of_your_concept} embeds and token",
token=hf_token
)
api.upload_folder(
folder_path="fp16_model",
path_in_repo="",
repo_id=repo_id,
token=hf_token
)
api.upload_folder(
folder_path=save_path,
path_in_repo="concept_images",
repo_id=repo_id,
token=hf_token
)
display_markdown(f'''## Your concept was saved successfully. [Click here to access it](https://huggingface.co/{repo_id})
''', raw=True)
#@title Set up the pipeline
from diffusers import DPMSolverMultistepScheduler
try:
pipe
except NameError:
pipe = StableDiffusionPipeline.from_pretrained(
args.output_dir,
scheduler = DPMSolverMultistepScheduler.from_pretrained(args.output_dir, subfolder="scheduler"),
torch_dtype=torch.float16,
).to("cuda")
#@title Run the Stable Diffusion pipeline with interactive UI Demo on Gradio
#@markdown Run this cell to get an interactive demo where you can run the model using Gradio
#@markdown ![](https://i.imgur.com/2ACLWu2.png)
import gradio as gr
def inference(prompt, num_samples):
all_images = []
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=25).images
all_images.extend(images)
return all_images
with gr.Blocks() as demo:
with gr.Row():
with gr.Column():
prompt = gr.Textbox(label="prompt")
samples = gr.Slider(label="Samples",value=1)
run = gr.Button(value="Run")
with gr.Column():
gallery = gr.Gallery(show_label=False)
run.click(inference, inputs=[prompt,samples], outputs=gallery)
gr.Examples([["a photo of sks toy riding a bicycle", 1,1]], [prompt,samples], gallery, inference, cache_examples=False)
demo.launch()
#@title Run the Stable Diffusion pipeline on Colab
#@markdown Don't forget to use the placeholder token in your prompt
prompt = "a \u003Ccat-toy> in mad max fury road" #@param {type:"string"}
num_samples = 2 #@param {type:"number"}
num_rows = 1 #@param {type:"number"}
all_images = []
for _ in range(num_rows):
images = pipe(prompt, num_images_per_prompt=num_samples, num_inference_steps=25, guidance_scale=9).images
all_images.extend(images)
grid = image_grid(all_images, num_rows, num_samples)
grid<jupyter_output><empty_output> | notebooks/diffusers/sd_dreambooth_training.ipynb/0 | {
"file_path": "notebooks/diffusers/sd_dreambooth_training.ipynb",
"repo_id": "notebooks",
"token_count": 11937
} | 312 |
#!/bin/bash
#SBATCH --job-name=idefics_zero3_finetuning_multinode # name
#SBATCH --nodes=2 # nodes
#SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
#SBATCH --cpus-per-task=96 # number of cores per tasks
#SBATCH --gres=gpu:8 # number of gpus
#SBATCH --output=%x-%j.out # output file name
export GPUS_PER_NODE=8
export MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
export MASTER_PORT=9901
source /fsx/m4/conda_installation/etc/profile.d/conda.sh
conda activate /fsx/m4/conda/hugo
srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
--nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
--master_addr $MASTER_ADDR --master_port $MASTER_PORT \
idefics_zero3_finetuning.py'
| notebooks/examples/idefics/idefics_zero3_finetuning/slurm_script_idefics_zero3_finetuning_multinode.slurm/0 | {
"file_path": "notebooks/examples/idefics/idefics_zero3_finetuning/slurm_script_idefics_zero3_finetuning_multinode.slurm",
"repo_id": "notebooks",
"token_count": 389
} | 313 |
<jupyter_start><jupyter_text>Protein Folding with ESMFold and 🤗`transformers` ESMFold ([paper link](https://www.biorxiv.org/content/10.1101/2022.07.20.500902v2)) is a recently released protein folding model from FAIR. Unlike other protein folding models, it does not require external databases or search tools to predict structures, and is up to 60X faster as a result.The port to the HuggingFace `transformers` library is even easier to use, as we've removed the dependency on tools like `openfold` - once you `pip install transformers`, you're ready to use this model! Note that all the code that follows will be running the model **locally**, rather than calling an external API. This means that no rate limiting applies here - you can predict as many structures as your computer can handle. In testing, we found that ESMFold needs about 16-24GB of GPU memory to run well, depending on protein length. This may be too much for the smaller free GPUs on Colab. First step, make sure you're up to date - you'll need the most recent release of `transformers` and `accelerate`! If you want to visualize your predicted protein structure in the notebook, you should also install py3Dmol.<jupyter_code>! pip install --upgrade transformers py3Dmol accelerate<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry
send_example_telemetry("protein_folding_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Preparing your model and tokenizer Now we load our model and tokenizer. If using GPU, use `model.cuda()` to transfer the model to GPU.<jupyter_code>from transformers import AutoTokenizer, EsmForProteinFolding
tokenizer = AutoTokenizer.from_pretrained("facebook/esmfold_v1")
model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1", low_cpu_mem_usage=True)
model = model.cuda()<jupyter_output><empty_output><jupyter_text>Performance optimizations Since ESMFold is quite a large model, there are some considerations regarding memory usage and performance.Firstly, we can optionally convert the language model stem to float16 to improve performance and memory usage when running on a modern GPU. This was used during model training, and so should not make the outputs from the rest of the model invalid.<jupyter_code># Uncomment to switch the stem to float16
model.esm = model.esm.half()<jupyter_output><empty_output><jupyter_text>Secondly, you can enable TensorFloat32 computation for a general speedup if your hardware supports it. This line has no effect if your hardware doesn't support it.<jupyter_code>import torch
torch.backends.cuda.matmul.allow_tf32 = True<jupyter_output><empty_output><jupyter_text>Finally, we can reduce the 'chunk_size' used in the folding trunk. Smaller chunk sizes use less memory, but have slightly worse performance.<jupyter_code># Uncomment this line if your GPU memory is 16GB or less, or if you're folding longer (over 600 or so) sequences
model.trunk.set_chunk_size(64)<jupyter_output><empty_output><jupyter_text>Folding a single chain First, we tokenize our input. If you've used `transformers` before, proteins are processed like any other input string. Make sure **not** to add special tokens - ESM was trained with them, but ESMFold was trained without them.<jupyter_code># This is the sequence for human GNAT1, because I worked on it when
# I was a postdoc and so everyone else has to learn to appreciate it too.
# Feel free to substitute your own peptides of interest
# Depending on memory constraints you may wish to use shorter sequences.
test_protein = "MGAGASAEEKHSRELEKKLKEDAEKDARTVKLLLLGAGESGKSTIVKQMKIIHQDGYSLEECLEFIAIIYGNTLQSILAIVRAMTTLNIQYGDSARQDDARKLMHMADTIEEGTMPKEMSDIIQRLWKDSGIQACFERASEYQLNDSAGYYLSDLERLVTPGYVPTEQDVLRSRVKTTGIIETQFSFKDLNFRMFDVGGQRSERKKWIHCFEGVTCIIFIAALSAYDMVLVEDDEVNRMHESLHLFNSICNHRYFATTSIVLFLNKKDVFFEKIKKAHLSICFPDYDGPNTYEDAGNYIKVQFLELNMRRDVKEIYSHMTCATDTQNVKFVFDAVTDIIIKENLKDCGLF"
tokenized_input = tokenizer([test_protein], return_tensors="pt", add_special_tokens=False)['input_ids']<jupyter_output><empty_output><jupyter_text>If you're using a GPU, you'll need to move the tokenized data to the GPU now.<jupyter_code>tokenized_input = tokenized_input.cuda()<jupyter_output><empty_output><jupyter_text>With our preparations out of the way, getting your model outputs is as simple as...<jupyter_code>import torch
with torch.no_grad():
output = model(tokenized_input)<jupyter_output><empty_output><jupyter_text>Now here's the tricky bit - we convert the model outputs to a PDB file. This will likely be moved to a function in `transformers` in the future, but everything's still quite new, so it lives here for now! This code comes from the original ESMFold repo, and uses some functions from `openfold` that have been ported to `transformers`.<jupyter_code>from transformers.models.esm.openfold_utils.protein import to_pdb, Protein as OFProtein
from transformers.models.esm.openfold_utils.feats import atom14_to_atom37
def convert_outputs_to_pdb(outputs):
final_atom_positions = atom14_to_atom37(outputs["positions"][-1], outputs)
outputs = {k: v.to("cpu").numpy() for k, v in outputs.items()}
final_atom_positions = final_atom_positions.cpu().numpy()
final_atom_mask = outputs["atom37_atom_exists"]
pdbs = []
for i in range(outputs["aatype"].shape[0]):
aa = outputs["aatype"][i]
pred_pos = final_atom_positions[i]
mask = final_atom_mask[i]
resid = outputs["residue_index"][i] + 1
pred = OFProtein(
aatype=aa,
atom_positions=pred_pos,
atom_mask=mask,
residue_index=resid,
b_factors=outputs["plddt"][i],
chain_index=outputs["chain_index"][i] if "chain_index" in outputs else None,
)
pdbs.append(to_pdb(pred))
return pdbs
pdb = convert_outputs_to_pdb(output)<jupyter_output><empty_output><jupyter_text>Now we have our pdb - can we visualize it?<jupyter_code>import py3Dmol
view = py3Dmol.view(js='https://3dmol.org/build/3Dmol.js', width=800, height=400)
view.addModel("".join(pdb), 'pdb')
view.setStyle({'model': -1}, {"cartoon": {'color': 'spectrum'}})<jupyter_output><empty_output><jupyter_text>Looks good! We can colour it differently, though - our model outputs a `plddt` field containing probabilities for each atom, indicating how confident it is in that part of the structure. In the conversion function above we added the `plddt` field in the `b_factors` argument, so it was included in our `pdb` string. Let's use it so that we can see high- and low-confidence areas of the structure visually!<jupyter_code># The plddt field is scaled from 0-1 on earlier versions of ESMFold but will be updated
# to match AlphaFold's scale of 0-100 in future versions.
# We check here so that this code will work on either:
if torch.max(output['plddt']) <= 1.0:
vmin = 0.5
vmax = 0.95
else:
vmin = 50
vmax = 95
view.setStyle({'cartoon': {'colorscheme': {'prop':'b','gradient': 'roygb','min': vmin,'max': vmax}}})<jupyter_output><empty_output><jupyter_text>Blue indicates high confidence, so that's a pretty high-quality prediction! Not too surprising considering GNAT1 was almost certainly in the training data, but nevertheless good to see. Finally, we can write our PDB string out to a file, which you can download and use in other tools.<jupyter_code>with open("output_structure.pdb", "w") as f:
f.write("".join(pdb))<jupyter_output><empty_output><jupyter_text>If you're running this in Colab (and haven't run out of memory by now!) then you can download the file we just created using the file browser interface at the left - the button looks like a little folder icon. Folding multiple chains Many proteins exist as complexes, either as multiple copies of the same peptide (a homopolymer), or a complex of different ones (a heteropolymer). To generate folds for such structures in ESMFold, we use a trick from the paper - we insert a "linker" of flexible glycine residues between each chain we want to fold simultaneously, and then we offset the position IDs for each chain from each other, so that the model treats them as being very distant portions of the same long chain. This works quite well, so let's see it in action! We'll use Glucosamine-6-phosphate deaminase (Uniprot: Q9CMF4) from the paper as an example. First, we define the sequence of the monomer, and the poly-G linker we want to use. Then we stick two copies of the monomer together with the linker in between.<jupyter_code>sequence = "MRLIPLHNVDQVAKWSARYIVDRINQFQPTEARPFVLGLPTGGTPLKTYEALIELYKAGEVSFKHVVTFNMDEYVGLPKEHPESYHSFMYKNFFDHVDIQEKNINILNGNTEDHDAECQRYEEKIKSYGKIHLFMGGVGVDGHIAFNEPASSLSSRTRIKTLTEDTLIANSRFFDNDVNKVPKYALTIGVGTLLDAEEVMILVTGYNKAQALQAAVEGSINHLWTVTALQMHRRAIIVCDEPATQELKVKTVKYFTELEASAIRSVK"
linker = 'G' * 25
homodimer_sequence = sequence + linker + sequence<jupyter_output><empty_output><jupyter_text>Now we tokenize the full homodimer sequence just like we did with the monomer sequence above.<jupyter_code>tokenized_homodimer = tokenizer([homodimer_sequence], return_tensors="pt", add_special_tokens=False)<jupyter_output><empty_output><jupyter_text>Now here's the tricky bit - we need to tweak the inputs a bit so the model doesn't think this is just a single peptide. The way we do that is by using the `position_ids` input to the model. The `position_ids` input tells the model the position of each amino acid in the input chain. By default, the model assumes that you've passed it one linear, contiguous chain - in other words, if you give it a peptide with 100 amino acids, it will assume the `position_ids` are just `[0, 1, ..., 98, 99]` unless you tell it otherwise.We want to make very clear that the two subunits aren't connected, though, so let's add a large offset to the position IDs of the second chain. The original repo uses 512, so let's stick with that.<jupyter_code>with torch.no_grad():
position_ids = torch.arange(len(homodimer_sequence), dtype=torch.long)
position_ids[len(sequence) + len(linker):] += 512
print(position_ids)<jupyter_output>tensor([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95,
96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107,
108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131,
132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143,
144, 145, 146, 147, 148, 1[...]<jupyter_text>Now we're ready to predict! Let's add our `position_ids` to the tokenized inputs, but make sure to add a singleton batch dimension first to match the other arrays in there! Once that's done we can transfer that dict to the GPU and we're ready to get our folded structure.<jupyter_code>tokenized_homodimer['position_ids'] = position_ids.unsqueeze(0)
tokenized_homodimer = {key: tensor.cuda() for key, tensor in tokenized_homodimer.items()}<jupyter_output><empty_output><jupyter_text>Now we compute predictions just like before.<jupyter_code>with torch.no_grad():
output = model(**tokenized_homodimer)<jupyter_output><empty_output><jupyter_text>Next, we need to remove the poly-G linker from the output, so we can display the structure as fully independent chains. To do that, we'll alter the `atom37_atom_exists` field in the output. This field indicates, for display purposes, which atoms are present at each residue position. We will simply set all of the atoms for each of the linker residues to 0.<jupyter_code>linker_mask = torch.tensor([1] * len(sequence) + [0] * len(linker) + [1] * len(sequence))[None, :, None]
output['atom37_atom_exists'] = output['atom37_atom_exists'] * linker_mask.to(output['atom37_atom_exists'].device)<jupyter_output><empty_output><jupyter_text>With those output tweaks done, now we can convert the output to PDB and view it as before.<jupyter_code>pdb = convert_outputs_to_pdb(output)
view = py3Dmol.view(js='https://3dmol.org/build/3Dmol.js', width=800, height=400)
view.addModel("".join(pdb), 'pdb')
# The plddt field is scaled from 0-1 on earlier versions of ESMFold but will be updated
# to match AlphaFold's scale of 0-100 in future versions.
# We check here so that this code will work on either:
if torch.max(output['plddt']) <= 1.0:
vmin = 0.5
vmax = 0.95
else:
vmin = 50
vmax = 95
view.setStyle({'cartoon': {'colorscheme': {'prop':'b','gradient': 'roygb','min': vmin,'max': vmax}}})<jupyter_output><empty_output><jupyter_text>And there's our dimer structure! As in the first example, we can now write this structure out to a PDB file and use it in downstream tools:<jupyter_code>with open("output_structure.pdb", "w") as f:
f.write("".join(pdb))<jupyter_output><empty_output><jupyter_text>**Tip**: If you're trying to predict a multimeric structure and you're getting low-quality outputs, try varying the order of the chains (if it's a heteropolymer) or the length of the linker. Bulk predictions Predicting single structures is nice, but the great advantage of running ESMFold locally is the fact that it's extremely fast while still producing highly accurate predictions. This makes it very suitable for proteomics work. Let's see that in action here - we're going to grab a set of monomeric proteins in E. Coli from Uniprot and fold all of them with high accuracy on a single GPU in a couple of minutes (depending on your GPU!)We do this because we can, and to upset any crystallographer friends we may have. First, you may need to install `requests`, `tqdm` and `pandas` if you don't have them already, to handle the data we grab from Uniprot.<jupyter_code># Uncomment and run this cell to install
#! pip install requests pandas tqdm<jupyter_output><empty_output><jupyter_text>Next, let's prepare the URL for our Uniprot query.<jupyter_code>import requests
uniprot_url = "https://rest.uniprot.org/uniprotkb/stream?compressed=true&fields=accession%2Csequence&format=tsv&query=%28%28taxonomy_id%3A83333%29%20AND%20%28reviewed%3Atrue%29%20AND%20%28length%3A%5B128%20TO%20512%5D%29%20AND%20%28cc_subunit%3Amonomer%29%29"<jupyter_output><empty_output><jupyter_text>This uniprot URL might seem mysterious, but it isn't! To get it, we searched for `(taxonomy_id:83333) AND (reviewed:true) AND (length:[128 TO 512]) AND (cc_subunit:monomer)` on UniProt to get all monomeric E.coli proteins of reasonable length, then selected 'Download', and set the format to TSV and the columns to `Sequence`.Once that's done, selecting `Generate URL for API` gives you a URL you can pass to Requests. Alternatively, if you're not on Colab you can just download the data through the web interface and open the file locally.<jupyter_code>uniprot_request = requests.get(uniprot_url)<jupyter_output><empty_output><jupyter_text>To get this data into Pandas, we use a `BytesIO` object, which Pandas will treat like a file. If you downloaded the data as a file you can skip this bit and just pass the filepath directly to `read_csv`.<jupyter_code>from io import BytesIO
import pandas
bio = BytesIO(uniprot_request.content)
df = pandas.read_csv(bio, compression='gzip', sep='\t')
df = df.dropna() # Remove empty columns, just in case
df<jupyter_output><empty_output><jupyter_text>If you have time, you could process this entire list, giving you folded structures for the entire monomeric proteome of E. Coli. For the sake of this demo, though, let's limit ourselves to 10:<jupyter_code>df = df.iloc[:10]<jupyter_output><empty_output><jupyter_text>Now let's pull out the sequences and batch-tokenize all of them.<jupyter_code>ecoli_tokenized = tokenizer(df.Sequence.tolist(), padding=False, add_special_tokens=False)['input_ids']<jupyter_output><empty_output><jupyter_text>Now we loop over our tokenized data, passing each sequence into our model:<jupyter_code>from tqdm import tqdm
outputs = []
with torch.no_grad():
for input_ids in tqdm(ecoli_tokenized):
input_ids = torch.tensor(input_ids, device='cuda').unsqueeze(0)
output = model(input_ids)
outputs.append({key: val.cpu() for key, val in output.items()})<jupyter_output>100%|███████████████████████████████████████████| 10/10 [02:04<00:00, 12.40s/it]<jupyter_text>Now we have 10 model outputs, which we can convert to PDB in bulk. If you get an error here, make sure you've run the cell above that defines the convert_outputs_to_pdb function!<jupyter_code>pdb_list = [convert_outputs_to_pdb(output) for output in outputs]<jupyter_output><empty_output><jupyter_text>Let's inspect one of them to see what we got.<jupyter_code>import py3Dmol
view = py3Dmol.view(js='https://3dmol.org/build/3Dmol.js', width=800, height=400)
view.addModel("".join(pdb_list[0]), 'pdb')
# The plddt field is scaled from 0-1 on earlier versions of ESMFold but will be updated
# to match AlphaFold's scale of 0-100 in future versions.
# We check here so that this code will work on either:
if torch.max(output['plddt']) <= 1.0:
vmin = 0.5
vmax = 0.95
else:
vmin = 50
vmax = 95
view.setStyle({'cartoon': {'colorscheme': {'prop':'b','gradient': 'roygb','min': vmin,'max': vmax}}})<jupyter_output><empty_output><jupyter_text>Looks good to me! Now we can save all of these to disc together.<jupyter_code>protein_identifiers = df.Entry.tolist()
for identifier, pdb in zip(protein_identifiers, pdb_list):
with open(f"{identifier}.pdb", "w") as f:
f.write("".join(pdb))<jupyter_output><empty_output> | notebooks/examples/protein_folding.ipynb/0 | {
"file_path": "notebooks/examples/protein_folding.ipynb",
"repo_id": "notebooks",
"token_count": 6321
} | 314 |
<jupyter_start><jupyter_text>How to fine-tune a distilbert model with ONNX RuntimeThis notebook is largely inspired by the text classification [notebook of Transformers](https://github.com/huggingface/notebooks/blob/main/examples/text_classification.ipynb) which takes PyTorch as backend for fine tuning. Here, instead of `Trainer`, you will use the `ORTTrainer` class in [🏎️ Optimum ](https://github.com/huggingface/optimum) library and take [ONNX Runtime](https://microsoft.github.io/onnxruntime/) as backend to accelerate the training. __Dependencies__To use ONNX Runtime for training, you need a machine with at least one NVIDIA GPU.__ONNX Runtime training module need to be properly installed before launching the notebook! Please follow the instruction in [Optimum's documentation](https://huggingface.co/docs/optimum/onnxruntime/trainer) to set up your environment.__Check your GPU:<jupyter_code>!nvidia-smi<jupyter_output>Fri Sep 16 09:45:13 2022
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.33.01 Driver Version: 440.33.01 CUDA Version: 11.3 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla T4 On | 00000000:00:1E.0 Off | 0 |
| N/A 27C P8 8W / 70W | 0MiB / 15109MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
+-----------------------------------------------------------------------------+
| Processes: GPU Memory |
| GPU [...]<jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Optimum, 🤗 Transformers, 🤗 Datasets and 🤗 evaluate. Uncomment the following cell and run it.<jupyter_code># !pip install optimum transformers datasets evaluate<jupyter_output><empty_output><jupyter_text>__[Optional]__ If you want to share your model with the community and generate an inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/welcome) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output>/usr/lib/python3/dist-packages/requests/__init__.py:89: RequestsDependencyWarning: urllib3 (1.26.12) or chardet (3.0.4) doesn't match a supported version!
warnings.warn("urllib3 ({}) or chardet ({}) doesn't match a supported "<jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code>!apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.15.0:<jupyter_code>import transformers
print(transformers.__version__)<jupyter_output>4.23.0.dev0<jupyter_text>In this notebook, you will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a text classification task of the [GLUE Benchmark](https://gluebenchmark.com/).![Widget inference on a text classification task](images/text_classification.png)The GLUE Benchmark is a group of nine classification tasks on sentences or pairs of sentences which are:- [CoLA](https://nyu-mll.github.io/CoLA/) (Corpus of Linguistic Acceptability) Determine if a sentence is grammatically correct or not.is a dataset containing sentences labeled grammatically correct or not.- [MNLI](https://arxiv.org/abs/1704.05426) (Multi-Genre Natural Language Inference) Determine if a sentence entails, contradicts or is unrelated to a given hypothesis. (This dataset has two versions, one with the validation and test set coming from the same distribution, another called mismatched where the validation and test use out-of-domain data.)- [MRPC](https://www.microsoft.com/en-us/download/details.aspx?id=52398) (Microsoft Research Paraphrase Corpus) Determine if two sentences are paraphrases from one another or not.- [QNLI](https://rajpurkar.github.io/SQuAD-explorer/) (Question-answering Natural Language Inference) Determine if the answer to a question is in the second sentence or not. (This dataset is built from the SQuAD dataset.)- [QQP](https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs) (Quora Question Pairs2) Determine if two questions are semantically equivalent or not.- [RTE](https://aclweb.org/aclwiki/Recognizing_Textual_Entailment) (Recognizing Textual Entailment) Determine if a sentence entails a given hypothesis or not.- [SST-2](https://nlp.stanford.edu/sentiment/index.html) (Stanford Sentiment Treebank) Determine if the sentence has a positive or negative sentiment.- [STS-B](http://ixa2.si.ehu.es/stswiki/index.php/STSbenchmark) (Semantic Textual Similarity Benchmark) Determine the similarity of two sentences with a score from 1 to 5.- [WNLI](https://cs.nyu.edu/faculty/davise/papers/WinogradSchemas/WS.html) (Winograd Natural Language Inference) Determine if a sentence with an anonymous pronoun and a sentence with this pronoun replaced are entailed or not. (This dataset is built from the Winograd Schema Challenge dataset.)We will see how to easily load the dataset for each one of those tasks and use the `ORTTrainer` API to fine-tune a model on it. Each task is named by its acronym, with `mnli-mm` standing for the mismatched version of MNLI (so same training set as `mnli` but different validation and test sets):<jupyter_code>GLUE_TASKS = ["cola", "mnli", "mnli-mm", "mrpc", "qnli", "qqp", "rte", "sst2", "stsb", "wnli"]<jupyter_output><empty_output><jupyter_text>This notebook is built to run on any of the tasks in the list above, with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a classification head. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:<jupyter_code>task = "cola"
model_checkpoint = "distilbert-base-uncased"
batch_size = 16<jupyter_output><empty_output><jupyter_text>We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry
send_example_telemetry("text_classification_notebook", framework="ort")<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the 🤗 [Datasets](https://github.com/huggingface/datasets) and 🤗 [Evaluate](https://github.com/huggingface/evaluate) libraries to download the data and get the metric we need to use for evaluation. This can be easily done with the functions `datasets.load_dataset` and `evaluate.load`.<jupyter_code>from datasets import load_dataset
import evaluate<jupyter_output><empty_output><jupyter_text>Apart from `mnli-mm` being a special code, we can directly pass our task name to those functions. `load_dataset` will cache the dataset to avoid downloading it again the next time you run this cell.<jupyter_code>actual_task = "mnli" if task == "mnli-mm" else task
dataset = load_dataset("glue", actual_task)
metric = evaluate.load("glue", actual_task)<jupyter_output><empty_output><jupyter_text>The `dataset` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set (with more keys for the mismatched validation and test set in the special case of `mnli`).<jupyter_code>dataset<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>dataset["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets
import random
import pandas as pd
from IPython.display import display, HTML
def show_random_elements(dataset, num_examples=10):
assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset."
picks = []
for _ in range(num_examples):
pick = random.randint(0, len(dataset)-1)
while pick in picks:
pick = random.randint(0, len(dataset)-1)
picks.append(pick)
df = pd.DataFrame(dataset[picks])
for column, typ in dataset.features.items():
if isinstance(typ, datasets.ClassLabel):
df[column] = df[column].transform(lambda i: typ.names[i])
display(HTML(df.to_html()))
show_random_elements(dataset["train"])<jupyter_output><empty_output><jupyter_text>The metric is an instance of [`evaluate.EvaluationModule`](https://huggingface.co/docs/evaluate/package_reference/main_classesevaluate.EvaluationModule):<jupyter_code>metric<jupyter_output><empty_output><jupyter_text>You can call its `compute` method with your predictions and labels directly and it will return a dictionary with the metric(s) value:<jupyter_code>import numpy as np
fake_preds = np.random.randint(0, 2, size=(64,))
fake_labels = np.random.randint(0, 2, size=(64,))
metric.compute(predictions=fake_preds, references=fake_labels)<jupyter_output><empty_output><jupyter_text>Note that `evaluate.load` has loaded the proper metric associated to your task, which is:- for CoLA: [Matthews Correlation Coefficient](https://en.wikipedia.org/wiki/Matthews_correlation_coefficient)- for MNLI (matched or mismatched): Accuracy- for MRPC: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for QNLI: Accuracy- for QQP: Accuracy and [F1 score](https://en.wikipedia.org/wiki/F1_score)- for RTE: Accuracy- for SST-2: Accuracy- for STS-B: [Pearson Correlation Coefficient](https://en.wikipedia.org/wiki/Pearson_correlation_coefficient) and [Spearman's_Rank_Correlation_Coefficient](https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient)- for WNLI: Accuracyso the metric object only computes the one(s) needed for your task. Preprocessing the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, use_fast=True)<jupyter_output><empty_output><jupyter_text>We pass along `use_fast=True` to the call above to use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, but if you got an error with the previous call, remove that argument. You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this one sentence!", "And this sentence goes with it.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.To preprocess our dataset, we will thus need the names of the columns containing the sentence(s). The following dictionary keeps track of the correspondence task to column names:<jupyter_code>task_to_keys = {
"cola": ("sentence", None),
"mnli": ("premise", "hypothesis"),
"mnli-mm": ("premise", "hypothesis"),
"mrpc": ("sentence1", "sentence2"),
"qnli": ("question", "sentence"),
"qqp": ("question1", "question2"),
"rte": ("sentence1", "sentence2"),
"sst2": ("sentence", None),
"stsb": ("sentence1", "sentence2"),
"wnli": ("sentence1", "sentence2"),
}<jupyter_output><empty_output><jupyter_text>We can double check it does work on our current dataset:<jupyter_code>sentence1_key, sentence2_key = task_to_keys[task]
if sentence2_key is None:
print(f"Sentence: {dataset['train'][0][sentence1_key]}")
else:
print(f"Sentence 1: {dataset['train'][0][sentence1_key]}")
print(f"Sentence 2: {dataset['train'][0][sentence2_key]}")<jupyter_output>Sentence: Our friends won't buy this analysis, let alone the next one we propose.<jupyter_text>We can them write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. This will ensure that an input longer that what the model selected can handle will be truncated to the maximum length accepted by the model.<jupyter_code>def preprocess_function(examples):
if sentence2_key is None:
return tokenizer(examples[sentence1_key], truncation=True)
return tokenizer(examples[sentence1_key], examples[sentence2_key], truncation=True)<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>preprocess_function(dataset['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>encoded_dataset = dataset.map(preprocess_function, batched=True)<jupyter_output><empty_output><jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about sentence classification, we use the `AutoModelForSequenceClassification` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which is always 2, except for STS-B which is a regression problem and MNLI where we have 3 labels):<jupyter_code>from transformers import AutoModelForSequenceClassification
from optimum.onnxruntime import ORTTrainer, ORTTrainingArguments
num_labels = 3 if task.startswith("mnli") else 1 if task=="stsb" else 2
model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=num_labels)<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForSequenceClassification: ['vocab_projector.bias', 'vocab_transform.bias', 'vocab_projector.weight', 'vocab_layer_norm.bias', 'vocab_layer_norm.weight', 'vocab_transform.weight']
- This IS expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing DistilBertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Some weights of DistilBertForSequenceClassification were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['pre_classifier.weight', 'pre_classifier.bias', 'classi[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `ORTTrainer`, we will need to define two more things. The most important is the [`ORTTrainingArguments`](https://huggingface.co/docs/optimum/onnxruntime/traineroptimum.onnxruntime.ORTTrainingArguments), which is a class that contains all the attributes to customize the training. You can also use `TrainingArguments` in Transformers, but `ORTTrainingArguments` enables more optimized features of ONNX Runtime. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>metric_name = "pearson" if task == "stsb" else "matthews_correlation" if task == "cola" else "accuracy"
model_name = model_checkpoint.split("/")[-1]
args = ORTTrainingArguments(
f"{model_name}-finetuned-{task}",
evaluation_strategy = "epoch",
save_strategy = "epoch",
learning_rate=2e-5,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
num_train_epochs=5,
weight_decay=0.01,
load_best_model_at_end=True,
metric_for_best_model=metric_name,
optim="adamw_ort_fused",
# push_to_hub=True,
)<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay. Since the best model might not be the one at the end of training, we ask the `ORTTrainer` to load the best model it saved (according to `metric_name`) at the end of training.The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"optimum/bert-finetuned-mrpc"`). The last thing to define for our `ORTTrainer` is how to compute the metrics from the predictions. We need to define a function for this, which will just use the `metric` we loaded earlier, the only preprocessing we have to do is to take the argmax of our predicted logits (our just squeeze the last axis in the case of STS-B):<jupyter_code>def compute_metrics(eval_pred):
predictions, labels = eval_pred
if task != "stsb":
predictions = np.argmax(predictions, axis=1)
else:
predictions = predictions[:, 0]
return metric.compute(predictions=predictions, references=labels)<jupyter_output><empty_output><jupyter_text>Then we just need to pass all of this along with our datasets to the `ORTTrainer`:<jupyter_code>validation_key = "validation_mismatched" if task == "mnli-mm" else "validation_matched" if task == "mnli" else "validation"
trainer = ORTTrainer(
model=model,
args=args,
train_dataset=encoded_dataset["train"],
eval_dataset=encoded_dataset[validation_key],
compute_metrics=compute_metrics,
tokenizer=tokenizer,
feature="sequence-classification",
)<jupyter_output><empty_output><jupyter_text>You might wonder why we pass along the `tokenizer` when we already preprocessed our data. This is because we will use it once last time to make all the samples we gather the same length by applying padding, which requires knowing the model's preferences regarding padding (to the left or right? with which token?). The `tokenizer` has a pad method that will do all of this right for us, and the `ORTTrainer` will use it. You can customize this part by defining and passing your own `data_collator` which will receive the samples like the dictionaries seen above and will need to return a dictionary of tensors. We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output>The following columns in the training set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.
You're using a DistilBertTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.
/usr/local/lib/python3.8/dist-packages/onnxruntime/training/ortmodule/_training_manager.py:191: UserWarning: Fast path enabled - skipping checks. Rebuild graph: True, Execution agent: True, Device check: True
warnings.warn(
WARNING: The shape inference of org.pytorch.aten::ATen type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.
WARNING: The shape inference of org.pytorch.aten::ATen type is missing[...]<jupyter_text>Evaluating your model Evaluate the performance of the model that you just fine-tuned with the validation dataset that you've passed to `ORTTrainer` by just calling the `evaluate` method. If you set `inference_with_ort=True`, the inference will be done with ONNX Runtime backend. Otherwise, the inference will take PyTorch as backend.<jupyter_code>trainer.evaluate(inference_with_ort=True)<jupyter_output>The following columns in the evaluation set don't have a corresponding argument in `DistilBertForSequenceClassification.forward` and have been ignored: sentence, idx. If sentence, idx are not expected by `DistilBertForSequenceClassification.forward`, you can safely ignore this message.
Using framework PyTorch: 1.11.0+cu113
/usr/local/lib/python3.8/dist-packages/transformers/models/distilbert/modeling_distilbert.py:213: TracerWarning: torch.tensor results are registered as constants in the trace. You can safely ignore this warning if you use this function to create tensors out of constant variables that would be the same every time you call this function. In any other case, this might cause the trace to be incorrect.
mask, torch.tensor(torch.finfo(scores.dtype).min)
WARNING: The shape inference of org.pytorch.aten::ATen type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function.
WARNING: The shape inference of org[...] | notebooks/examples/text_classification_ort.ipynb/0 | {
"file_path": "notebooks/examples/text_classification_ort.ipynb",
"repo_id": "notebooks",
"token_count": 7176
} | 315 |
<jupyter_start><jupyter_text>Speed Comparison `Safetensors` is really fast. Let's compare it against `PyTorch` by loading [gpt2](https://huggingface.co/gpt2) weights. To run the [GPU benchmark](gpu-benchmark), make sure your machine has GPU or you have selected `GPU runtime` if you are using Google Colab.Before you begin, make sure you have all the necessary libraries installed:<jupyter_code>!pip install safetensors huggingface_hub torch<jupyter_output><empty_output><jupyter_text>Let's start by importing all the packages that will be used:<jupyter_code>import os
import datetime
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
import torch<jupyter_output><empty_output><jupyter_text>Download safetensors & torch weights for gpt2:<jupyter_code>sf_filename = hf_hub_download("gpt2", filename="model.safetensors")
pt_filename = hf_hub_download("gpt2", filename="pytorch_model.bin")<jupyter_output><empty_output><jupyter_text>CPU benchmark<jupyter_code>start_st = datetime.datetime.now()
weights = load_file(sf_filename, device="cpu")
load_time_st = datetime.datetime.now() - start_st
print(f"Loaded safetensors {load_time_st}")
start_pt = datetime.datetime.now()
weights = torch.load(pt_filename, map_location="cpu")
load_time_pt = datetime.datetime.now() - start_pt
print(f"Loaded pytorch {load_time_pt}")
print(f"on CPU, safetensors is faster than pytorch by: {load_time_pt/load_time_st:.1f} X")<jupyter_output><empty_output><jupyter_text>This speedup is due to the fact that this library avoids unnecessary copies by mapping the file directly. It is actually possible to do on [pure pytorch](https://gist.github.com/Narsil/3edeec2669a5e94e4707aa0f901d2282).The currently shown speedup was gotten on:* OS: Ubuntu 18.04.6 LTS* CPU: Intel(R) Xeon(R) CPU @ 2.00GHz GPU benchmark<jupyter_code># This is required because this feature hasn't been fully verified yet, but
# it's been tested on many different environments
os.environ["SAFETENSORS_FAST_GPU"] = "1"
# CUDA startup out of the measurement
torch.zeros((2, 2)).cuda()
start_st = datetime.datetime.now()
weights = load_file(sf_filename, device="cuda:0")
load_time_st = datetime.datetime.now() - start_st
print(f"Loaded safetensors {load_time_st}")
start_pt = datetime.datetime.now()
weights = torch.load(pt_filename, map_location="cuda:0")
load_time_pt = datetime.datetime.now() - start_pt
print(f"Loaded pytorch {load_time_pt}")
print(f"on GPU, safetensors is faster than pytorch by: {load_time_pt/load_time_st:.1f} X")<jupyter_output><empty_output> | notebooks/safetensors_doc/en/speed.ipynb/0 | {
"file_path": "notebooks/safetensors_doc/en/speed.ipynb",
"repo_id": "notebooks",
"token_count": 893
} | 316 |
<jupyter_start><jupyter_text>Huggingface Sagemaker - Vision Transformer Image Classification with the `google/vit` on `cifar10` 1. [Introduction](Introduction) 2. [Development Environment and Permissions](Development-Environment-and-Permissions) 1. [Installation](Installation) 3. [Permissions](Permissions)3. [Processing](Preprocessing) 1. [convert features and transform images](convert-features-and-transform-images) 2. [Uploading data to sagemaker_session_bucket](Uploading-data-to-sagemaker_session_bucket) 4. [Fine-tuning & starting Sagemaker Training Job](Fine-tuning-\&-starting-Sagemaker-Training-Job) 1. [Creating an Estimator and start a training job](Creating-an-Estimator-and-start-a-training-job) IntroductionWelcome to our end-to-end binary Image-Classification example. In this demo, we will use the Hugging Faces `transformers` and `datasets` library together with Amazon SageMaker to fine-tune a pre-trained vision transformers on image classification. The script and notebook is inspired by [NielsRogges](https://github.com/NielsRogge) example notebook of [Fine-tune the Vision Transformer on CIFAR-10](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/VisionTransformer/Fine_tuning_the_Vision_Transformer_on_CIFAR_10_with_the_%F0%9F%A4%97_Trainer.ipynb). Niels was also the contributor of the Vision Transformer into `transformers`._**NOTE: You can run this demo in Sagemaker Studio, your local machine or Sagemaker Notebook Instances**_ ![Bildschirmfoto%202021-06-09%20um%2010.08.22.png](attachment:Bildschirmfoto%202021-06-09%20um%2010.08.22.png) Development Environment and Permissions _**Use at least a `t3.large` instance otherwise preprocessing will take ages.**_ Installation_*Note:* we only install the required libraries from Hugging Face and AWS. You also need PyTorch or Tensorflow, if not already installed_<jupyter_code>!pip install "sagemaker>=2.140.0" "transformers==4.26.1" "datasets[s3]==2.10.1" --upgrade<jupyter_output><empty_output><jupyter_text>Permissions _If you are going to use Sagemaker in a local environment, you need access to an IAM Role with the required permissions for Sagemaker. You can find out more about this [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html)_<jupyter_code>import sagemaker
import boto3
sess = sagemaker.Session()
# sagemaker session bucket -> used for uploading data, models and logs
# sagemaker will automatically create this bucket if it not exists
sagemaker_session_bucket=None
if sagemaker_session_bucket is None and sess is not None:
# set to default bucket if a bucket name is not given
sagemaker_session_bucket = sess.default_bucket()
try:
role = sagemaker.get_execution_role()
except ValueError:
iam = boto3.client('iam')
role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn']
sess = sagemaker.Session(default_bucket=sagemaker_session_bucket)
print(f"sagemaker role arn: {role}")
print(f"sagemaker bucket: {sess.default_bucket()}")
print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output><empty_output><jupyter_text>PreprocessingWe are using the `datasets` library to download and preprocess the `fashion-mnist` dataset. After preprocessing, the dataset will be uploaded to our `sagemaker_session_bucket` to be used within our training job. The [cifar10](https://www.cs.toronto.edu/~kriz/cifar.html) are labeled subsets of the 80 million tiny images dataset. They were collected by Alex Krizhevsky, Vinod Nair, and Geoffrey Hinton. _Note from Nils: "that in the ViT paper, the best results were obtained when fine-tuning at a higher resolution. For this, one interpolates the pre-trained absolute position embeddings"._ Convert Features and transform images<jupyter_code>from transformers import AutoProcessor
from datasets import load_dataset
import numpy as np
from PIL import Image
from random import randint
# dataset used
dataset_name = 'cifar10'
# s3 key prefix for the data
s3_prefix = 'samples/datasets/cifar10'
# FeatureExtractor used in preprocessing
model_name = 'google/vit-base-patch16-224-in21k'
image_processor = AutoProcessor.from_pretrained(model_name)<jupyter_output><empty_output><jupyter_text>We are downsampling dataset to make it faster to preprocess.<jupyter_code># load dataset
train_dataset, test_dataset = load_dataset(dataset_name,
split=['train[:5000]', 'test[:2000]'])
# display random sample
train_dataset[0]["img"]
from datasets import Features, Array3D
# we need to extend the features
features = Features({
**train_dataset.features,
'pixel_values': Array3D(dtype="float32", shape=(3, 224, 224)),
})
# extractor helper function
def preprocess_images(examples):
# get batch of images
images = examples['img']
inputs = image_processor(images=images)
examples['pixel_values'] = inputs['pixel_values']
return examples
# preprocess dataset
train_dataset = train_dataset.map(preprocess_images, batched=True,features=features)
test_dataset = test_dataset.map(preprocess_images, batched=True,features=features)
# set to torch format for training
train_dataset.set_format('torch', columns=['pixel_values', 'label'])
test_dataset.set_format('torch', columns=['pixel_values', 'label'])
# remove unused column
train_dataet = train_dataset.remove_columns("img")<jupyter_output><empty_output><jupyter_text>Uploading data to `sagemaker_session_bucket`After we processed the `datasets` we are going to use the new `FileSystem` [integration](https://huggingface.co/docs/datasets/filesystems.html) to upload our dataset to S3.<jupyter_code>import botocore
from datasets.filesystems import S3FileSystem
# save train_dataset to s3
training_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/train'
train_dataset.save_to_disk(training_input_path, num_shards=1)
# save test_dataset to s3
test_input_path = f's3://{sess.default_bucket()}/{s3_prefix}/test'
test_dataset.save_to_disk(test_input_path, num_shards=1)
print(f"train dataset is uploaded to {training_input_path}")
print(f"test dataset is uploaded to {test_input_path}")<jupyter_output><empty_output><jupyter_text>num_train_epochs Fine-tuning & starting Sagemaker Training JobIn order to create a sagemaker training job we need a `HuggingFace` Estimator. The Estimator handles end-to-end Amazon SageMaker training and deployment tasks. In an Estimator, we define which fine-tuning script should be used as `entry_point`, which `instance_type` should be used, which `hyperparameters` are passed in .....```python/opt/conda/bin/python train.py --num_train_epochs 1 --model_name google/vit-base-patch16-224-in21k --per_device_train_batch_size 16``` Creating an Estimator and start a training jobWe are defining the hyperparameter `use_auth_token` with our token from huggingface.co/settings to automatically upload our model to the Hugging Face Model Hub afterwards. The `train.py` makes us of the `.push_to_hub()` of the Trainer API to automatically upload model to hf.co/models.<jupyter_code>from sagemaker.huggingface import HuggingFace
# hyperparameters, which are passed into the training job
hyperparameters={'num_train_epochs': 3, # train epochs
'per_device_train_batch_size': 16, # batch size
'model_name': model_name, # model which will be trained on
'use_auth_token': '' # add you API Token to upload the model
}
huggingface_estimator = HuggingFace(entry_point='train.py',
source_dir='./scripts',
instance_type='ml.p3.2xlarge',
instance_count=1,
role=role,
transformers_version='4.26',
pytorch_version='1.13',
py_version='py39',
hyperparameters = hyperparameters)
# starting the train job with our uploaded datasets as input
huggingface_estimator.fit({'train': training_input_path, 'test': test_input_path})<jupyter_output><empty_output><jupyter_text>Upload to hubSince we have done the preprocessing in advance we need to upload the `image_processor` separately. You can this by creating a `preprocessor_config.json` file in the UI on huggingface.co or using the `huggingface_hub` python library. ![Bildschirmfoto%202021-06-09%20um%2010.02.52.png](attachment:Bildschirmfoto%202021-06-09%20um%2010.02.52.png)The file needs to contain the configuration of the `image_processor`<jupyter_code>print(image_processor.to_json_string())<jupyter_output><empty_output> | notebooks/sagemaker/09_image_classification_vision_transformer/sagemaker-notebook.ipynb/0 | {
"file_path": "notebooks/sagemaker/09_image_classification_vision_transformer/sagemaker-notebook.ipynb",
"repo_id": "notebooks",
"token_count": 2996
} | 317 |
# Optimum Graphcore
graphcore_index: graphcore/index
graphcore_quickstart: graphcore/quickstart
graphcore_ipu_config: graphcore/ipu_config
graphcore_trainer: graphcore/trainer
graphcore_add_support_for_new_model: graphcore/add_support_for_new_model
# Optimum Habana
habana_index: habana/index
habana_quickstart: habana/quickstart
habana_single_hpu: habana/tutorials/single_hpu
habana_distributed: habana/tutorials/distributed
habana_deepspeed: habana/usage_guides/deepspeed
habana_accelerate_training: habana/usage_guides/accelerate_training
habana_trainer: habana/package_reference/trainer
habana_gaudi_config: habana/package_reference/gaudi_config
habana/usage_guides/stable_diffusion: habana/tutorials/stable_diffusion
habana/tutorials/pretraining: habana/usage_guides/pretraining
# Optimum Intel
intel_index: intel/index
intel_quickstart: intel/optimization_inc
intel_configuration: intel/reference_inc
intel_optimization: intel/optimization_inc
intel_quantization: intel/optimization_inc
intel_pruning: intel/optimization_inc
intel_trainer: intel/reference_inc
# Optimum Neuron
docs/optimum-neuron/index: /docs/optimum-neuron/index
# Optimum TPU
docs/optimum-tpu/index: /docs/optimum-tpu/index
tpu/index: /docs/optimum-tpu/index
| optimum/docs/source/_redirects.yml/0 | {
"file_path": "optimum/docs/source/_redirects.yml",
"repo_id": "optimum",
"token_count": 435
} | 318 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
-->
# Adding support for an unsupported architecture
If you wish to export a model whose architecture is not already supported by the library, the PR [#813 Adds support for ResNet](https://github.com/huggingface/optimum/pull/813 ) can be used as a reference.
You can make sure tests pass for the new `my_new_modeltype` model type by running:
```bash
pytest tests/exporters/tflite/test_*.py -k "my_new_modeltype" -s --exitfirst
```
| optimum/docs/source/exporters/tflite/usage_guides/contribute.mdx/0 | {
"file_path": "optimum/docs/source/exporters/tflite/usage_guides/contribute.mdx",
"repo_id": "optimum",
"token_count": 272
} | 319 |
# Accelerated inference on NVIDIA GPUs
By default, ONNX Runtime runs inference on CPU devices. However, it is possible to place supported operations on an NVIDIA GPU, while leaving any unsupported ones on CPU. In most cases, this allows costly operations to be placed on GPU and significantly accelerate inference.
This guide will show you how to run inference on two execution providers that ONNX Runtime supports for NVIDIA GPUs:
* `CUDAExecutionProvider`: Generic acceleration on NVIDIA CUDA-enabled GPUs.
* `TensorrtExecutionProvider`: Uses NVIDIA’s [TensorRT](https://developer.nvidia.com/tensorrt) inference engine and generally provides the best runtime performance.
<Tip warning={true}>
Due to a limitation of ONNX Runtime, it is not possible to run quantized models on `CUDAExecutionProvider` and only models with static quantization can be run on `TensorrtExecutionProvider`.
</Tip>
## CUDAExecutionProvider
### CUDA installation
Provided the CUDA and cuDNN [requirements](https://onnxruntime.ai/docs/execution-providers/CUDA-ExecutionProvider.html#requirements) are satisfied, install the additional dependencies by running
```bash
pip install optimum[onnxruntime-gpu]
```
To avoid conflicts between `onnxruntime` and `onnxruntime-gpu`, make sure the package `onnxruntime` is not installed by running `pip uninstall onnxruntime` prior to installing Optimum.
### Checking the CUDA installation is successful
Before going further, run the following sample code to check whether the install was successful:
```python
>>> from optimum.onnxruntime import ORTModelForSequenceClassification
>>> from transformers import AutoTokenizer
>>> ort_model = ORTModelForSequenceClassification.from_pretrained(
... "philschmid/tiny-bert-sst2-distilled",
... export=True,
... provider="CUDAExecutionProvider",
... )
>>> tokenizer = AutoTokenizer.from_pretrained("philschmid/tiny-bert-sst2-distilled")
>>> inputs = tokenizer("expectations were low, actual enjoyment was high", return_tensors="pt", padding=True)
>>> outputs = ort_model(**inputs)
>>> assert ort_model.providers == ["CUDAExecutionProvider", "CPUExecutionProvider"]
```
In case this code runs gracefully, congratulations, the installation is successful! If you encounter the following error or similar,
```
ValueError: Asked to use CUDAExecutionProvider as an ONNX Runtime execution provider, but the available execution providers are ['CPUExecutionProvider'].
```
then something is wrong with the CUDA or ONNX Runtime installation.
### Use CUDA execution provider with floating-point models
For non-quantized models, the use is straightforward. Simply specify the `provider` argument in the `ORTModel.from_pretrained()` method. Here's an example:
```python
>>> from optimum.onnxruntime import ORTModelForSequenceClassification
>>> ort_model = ORTModelForSequenceClassification.from_pretrained(
... "distilbert-base-uncased-finetuned-sst-2-english",
... export=True,
... provider="CUDAExecutionProvider",
... )
```
The model can then be used with the common 🤗 Transformers API for inference and evaluation, such as [pipelines](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/pipelines).
When using Transformers pipeline, note that the `device` argument should be set to perform pre- and post-processing on GPU, following the example below:
```python
>>> from optimum.pipelines import pipeline
>>> from transformers import AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased-finetuned-sst-2-english")
>>> pipe = pipeline(task="text-classification", model=ort_model, tokenizer=tokenizer, device="cuda:0")
>>> result = pipe("Both the music and visual were astounding, not to mention the actors performance.")
>>> print(result) # doctest: +IGNORE_RESULT
# printing: [{'label': 'POSITIVE', 'score': 0.9997727274894714}]
```
Additionally, you can pass the session option `log_severity_level = 0` (verbose), to check whether all nodes are indeed placed on the CUDA execution provider or not:
```python
>>> import onnxruntime
>>> session_options = onnxruntime.SessionOptions()
>>> session_options.log_severity_level = 0
>>> ort_model = ORTModelForSequenceClassification.from_pretrained(
... "distilbert-base-uncased-finetuned-sst-2-english",
... export=True,
... provider="CUDAExecutionProvider",
... session_options=session_options
... )
```
You should see the following logs:
```
2022-10-18 14:59:13.728886041 [V:onnxruntime:, session_state.cc:1193 VerifyEachN
odeIsAssignedToAnEp] Provider: [CPUExecutionProvider]: [Gather (Gather_76), Uns
queeze (Unsqueeze_78), Gather (Gather_97), Gather (Gather_100), Concat (Concat_1
10), Unsqueeze (Unsqueeze_125), ...]
2022-10-18 14:59:13.728906431 [V:onnxruntime:, session_state.cc:1193 VerifyEachN
odeIsAssignedToAnEp] Provider: [CUDAExecutionProvider]: [Shape (Shape_74), Slic
e (Slice_80), Gather (Gather_81), Gather (Gather_82), Add (Add_83), Shape (Shape
_95), MatMul (MatMul_101), ...]
```
In this example, we can see that all the costly MatMul operations are placed on the CUDA execution provider.
### Use CUDA execution provider with quantized models
Due to current limitations in ONNX Runtime, it is not possible to use quantized models with `CUDAExecutionProvider`. The reasons are as follows:
* When using [🤗 Optimum dynamic quantization](quantization#dynamic-quantization-example), nodes as [`MatMulInteger`](https://github.com/onnx/onnx/blob/v1.12.0/docs/Operators.md#MatMulInteger), [`DynamicQuantizeLinear`](https://github.com/onnx/onnx/blob/v1.12.0/docs/Operators.md#DynamicQuantizeLinear) may be inserted in the ONNX graph, that cannot be consumed by the CUDA execution provider.
* When using [static quantization](quantization#static-quantization-example), the ONNX computation graph will contain matrix multiplications and convolutions in floating-point arithmetic, along with Quantize + Dequantize operations to simulate quantization. In this case, although the costly matrix multiplications and convolutions will be run on the GPU, they will use floating-point arithmetic as the `CUDAExecutionProvider` can not consume the Quantize + Dequantize nodes to replace them by the operations using integer arithmetic.
### Reduce memory footprint with IOBinding
[IOBinding](https://onnxruntime.ai/docs/api/python/api_summary.html#iobinding) is an efficient way to avoid expensive data copying when using GPUs. By default, ONNX Runtime will copy the input from the CPU (even if the tensors are already copied to the targeted device), and assume that outputs also need to be copied back to the CPU from GPUs after the run. These data copying overheads between the host and devices are expensive, and __can lead to worse inference latency than vanilla PyTorch__ especially for the decoding process.
To avoid the slowdown, 🤗 Optimum adopts the IOBinding to copy inputs onto GPUs and pre-allocate memory for outputs prior the inference. When instanciating the `ORTModel`, set the value of the argument `use_io_binding` to choose whether to turn on the IOBinding during the inference. `use_io_binding` is set to `True` by default, if you choose CUDA as execution provider.
And if you want to turn off IOBinding:
```python
>>> from transformers import AutoTokenizer, pipeline
>>> from optimum.onnxruntime import ORTModelForSeq2SeqLM
# Load the model from the hub and export it to the ONNX format
>>> model = ORTModelForSeq2SeqLM.from_pretrained("t5-small", export=True, use_io_binding=False)
>>> tokenizer = AutoTokenizer.from_pretrained("t5-small")
# Create a pipeline
>>> onnx_translation = pipeline("translation_en_to_fr", model=model, tokenizer=tokenizer, device="cuda:0")
```
For the time being, IOBinding is supported for task-defined ORT models, if you want us to add support for custom models, file us an issue on the Optimum's repository.
### Observed time gains
We tested three common models with a decoding process: `GPT2` / `T5-small` / `M2M100-418M`, and the benchmark was run on a versatile Tesla T4 GPU (more environment details at the end of this section).
Here are some performance results running with `CUDAExecutionProvider` when IOBinding has been turned on. We have tested input sequence length from 8 to 512, and generated outputs both with greedy search and beam search (`num_beam=5`):
<table><tr>
<td>
<p align="center">
<img alt="GPT2" src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/t4_res_ort_gpt2.png" width="450">
<br>
<em style="color: grey">GPT2</em>
</p>
</td>
<td>
<p align="center">
<img alt="T5-small" src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/t4_res_ort_t5_s.png" width="450">
<br>
<em style="color: grey">T5-small</em>
</p>
</td></tr>
<tr><td>
<p align="center">
<img alt="M2M100-418M" src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/t4_res_ort_m2m100_418m.png" width="450">
<br>
<em style="color: grey">M2M100-418M</em>
</p>
</td>
</tr></table>
And here is a summary for the saving time with different sequence lengths (32 / 128) and generation modes(greedy search / beam search) while using ONNX Runtime compared with PyTorch:
<table><tr>
<td>
<p align="center">
<img alt="seq32" src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/inference_models_32.png" width="800">
<br>
<em style="color: grey">sequence length: 32</em>
</p>
</td></tr>
<tr><td>
<p align="center">
<img alt="seq128" src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/inference_models_128.png" width="800">
<br>
<em style="color: grey">sequence length: 128</em>
</p>
</td>
</tr></table>
Environment:
```
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.33.01 Driver Version: 440.33.01 CUDA Version: 11.3 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
|===============================+======================+======================|
| 0 Tesla T4 On | 00000000:00:1E.0 Off | 0 |
| N/A 28C P8 8W / 70W | 0MiB / 15109MiB | 0% Default |
+-------------------------------+----------------------+----------------------+
- Platform: Linux-5.4.0-1089-aws-x86_64-with-glibc2.29
- Python version: 3.8.10
- `transformers` version: 4.24.0
- `optimum` version: 1.5.0
- PyTorch version: 1.12.0+cu113
```
Note that previous experiments are run with __vanilla ONNX__ models exported directly from the exporter. If you are interested in __further acceleration__, with `ORTOptimizer` you can optimize the graph and convert your model to FP16 if you have a GPU with mixed precision capabilities.
## TensorrtExecutionProvider
TensorRT uses its own set of optimizations, and **generally does not support the optimizations from [`~onnxruntime.ORTOptimizer`]**. We therefore recommend to use the original ONNX models when using TensorrtExecutionProvider ([reference](https://github.com/microsoft/onnxruntime/issues/10905#issuecomment-1072649358)).
### TensorRT installation
The easiest way to use TensorRT as the execution provider for models optimized through 🤗 Optimum is with the available ONNX Runtime `TensorrtExecutionProvider`.
In order to use 🤗 Optimum with TensorRT in a local environment, we recommend following the NVIDIA installation guides:
* CUDA toolkit: https://docs.nvidia.com/cuda/cuda-installation-guide-linux/index.html
* cuDNN: https://docs.nvidia.com/deeplearning/cudnn/install-guide/index.html
* TensorRT: https://docs.nvidia.com/deeplearning/tensorrt/install-guide/index.html
For TensorRT, we recommend the Tar File Installation method. Alternatively, TensorRT may be installable with `pip` by following [these instructions](https://github.com/microsoft/onnxruntime/issues/9986).
Once the required packages are installed, the following environment variables need to be set with the appropriate paths for ONNX Runtime to detect TensorRT installation:
```bash
export CUDA_PATH=/usr/local/cuda
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/cuda-x.x/lib64:/path/to/TensorRT-8.x.x/lib
```
### Checking the TensorRT installation is successful
Before going further, run the following sample code to check whether the install was successful:
```python
>>> from optimum.onnxruntime import ORTModelForSequenceClassification
>>> from transformers import AutoTokenizer
>>> ort_model = ORTModelForSequenceClassification.from_pretrained(
... "philschmid/tiny-bert-sst2-distilled",
... export=True,
... provider="TensorrtExecutionProvider",
... )
>>> tokenizer = AutoTokenizer.from_pretrained("philschmid/tiny-bert-sst2-distilled")
>>> inp = tokenizer("expectations were low, actual enjoyment was high", return_tensors="pt", padding=True)
>>> result = ort_model(**inp)
>>> assert ort_model.providers == ["TensorrtExecutionProvider", "CUDAExecutionProvider", "CPUExecutionProvider"]
```
In case this code runs gracefully, congratulations, the installation is successful!
In case the above `assert` fails, or you encounter the following warning
```
Failed to create TensorrtExecutionProvider. Please reference https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#requirements to ensure all dependencies are met.
```
something is wrong with the TensorRT or ONNX Runtime installation.
### TensorRT engine build and warmup
TensorRT requires to build its [inference engine](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#build-phase) ahead of inference, which takes some time due to the model optimization and nodes fusion. To avoid rebuilding the engine every time the model is loaded, ONNX Runtime provides a pair of options to save the engine: `trt_engine_cache_enable` and `trt_engine_cache_path`.
We recommend setting these two provider options when using the TensorRT execution provider. The usage is as follows, where [`optimum/gpt2`](https://huggingface.co/optimum/gpt2) is an ONNX model converted from PyTorch using the [Optimum ONNX exporter](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model):
```python
>>> from optimum.onnxruntime import ORTModelForCausalLM
>>> provider_options = {
... "trt_engine_cache_enable": True,
... "trt_engine_cache_path": "tmp/trt_cache_gpt2_example"
... }
# the TensorRT engine is not built here, it will be when doing inference
>>> ort_model = ORTModelForCausalLM.from_pretrained(
... "optimum/gpt2",
... use_cache=False,
... provider="TensorrtExecutionProvider",
... provider_options=provider_options
... )
```
TensorRT builds its engine depending on specified input shapes. Unfortunately, in the [current ONNX Runtime implementation](https://github.com/microsoft/onnxruntime/blob/613920d6c5f53a8e5e647c5f1dcdecb0a8beef31/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc#L1677-L1688) (references: [1](https://github.com/microsoft/onnxruntime/issues/13559), [2](https://github.com/microsoft/onnxruntime/issues/13851)), the engine is rebuilt every time an input has a shape smaller than the previously smallest encountered shape, and conversely if the input has a shape larger than the previously largest encountered shape. For example, if a model takes `(batch_size, input_ids)` as inputs, and the model takes successively the inputs:
1. `input.shape: (4, 5) --> the engine is built (first input)`
2. `input.shape: (4, 10) --> engine rebuilt (10 larger than 5)`
3. `input.shape: (4, 7) --> no rebuild (5 <= 7 <= 10)`
4. `input.shape: (4, 12) --> engine rebuilt (10 <= 12)`
5. `input.shape: (4, 3) --> engine rebuilt (3 <= 5)`
One big issue is that building the engine can be time consuming, especially for large models. Therefore, as a workaround, one recommendation is to **first build the TensorRT engine with an input of small shape, and then with an input of large shape to have an engine valid for all shapes inbetween**. This allows to avoid rebuilding the engine for new small and large shapes, which is unwanted once the model is deployed for inference.
Passing the engine cache path in the provider options, the engine can therefore be built once for all and used fully for inference thereafter.
For example, for text generation, the engine can be built with:
```python
>>> import os
>>> from transformers import AutoTokenizer
>>> from optimum.onnxruntime import ORTModelForCausalLM
>>> os.makedirs("tmp/trt_cache_gpt2_example", exist_ok=True)
>>> provider_options = {
... "trt_engine_cache_enable": True,
... "trt_engine_cache_path": "tmp/trt_cache_gpt2_example"
... }
>>> ort_model = ORTModelForCausalLM.from_pretrained(
... "optimum/gpt2",
... use_cache=False,
... provider="TensorrtExecutionProvider",
... provider_options=provider_options,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("optimum/gpt2")
>>> print("Building engine for a short sequence...") # doctest: +IGNORE_RESULT
>>> text = ["short"]
>>> encoded_input = tokenizer(text, return_tensors="pt").to("cuda")
>>> output = ort_model(**encoded_input)
>>> print("Building engine for a long sequence...") # doctest: +IGNORE_RESULT
>>> text = [" a very long input just for demo purpose, this is very long" * 10]
>>> encoded_input = tokenizer(text, return_tensors="pt").to("cuda")
>>> output = ort_model(**encoded_input)
```
The engine is stored as:
![TensorRT engine cache folder](https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/tensorrt_cache.png)
Once the engine is built, the cache can be reloaded and generation does not need to rebuild the engine:
```python
>>> from transformers import AutoTokenizer
>>> from optimum.onnxruntime import ORTModelForCausalLM
>>> provider_options = {
... "trt_engine_cache_enable": True,
... "trt_engine_cache_path": "tmp/trt_cache_gpt2_example"
... }
>>> ort_model = ORTModelForCausalLM.from_pretrained(
... "optimum/gpt2",
... use_cache=False,
... provider="TensorrtExecutionProvider",
... provider_options=provider_options,
... )
>>> tokenizer = AutoTokenizer.from_pretrained("optimum/gpt2")
>>> text = ["Replace me by any text you'd like."]
>>> encoded_input = tokenizer(text, return_tensors="pt").to("cuda")
>>> for i in range(3):
... output = ort_model.generate(**encoded_input)
... print(tokenizer.decode(output[0])) # doctest: +IGNORE_RESULT
```
#### Warmup
Once the engine is built, it is recommended to do before inference **one or a few warmup steps**, as the first inference runs have [some overhead](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#trtexec-flags).
### Use TensorRT execution provider with floating-point models
For non-quantized models, the use is straightforward, by simply using the `provider` argument in `ORTModel.from_pretrained()`. For example:
```python
>>> from optimum.onnxruntime import ORTModelForSequenceClassification
>>> ort_model = ORTModelForSequenceClassification.from_pretrained(
... "distilbert-base-uncased-finetuned-sst-2-english",
... export=True,
... provider="TensorrtExecutionProvider",
... )
```
[As previously for `CUDAExecutionProvider`](#use-cuda-execution-provider-with-floatingpoint-models), by passing the session option `log_severity_level = 0` (verbose), we can check in the logs whether all nodes are indeed placed on the TensorRT execution provider or not:
```
2022-09-22 14:12:48.371513741 [V:onnxruntime:, session_state.cc:1188 VerifyEachNodeIsAssignedToAnEp] All nodes have been placed on [TensorrtExecutionProvider]
```
The model can then be used with the common 🤗 Transformers API for inference and evaluation, such as [pipelines](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/pipelines).
### Use TensorRT execution provider with quantized models
When it comes to quantized models, TensorRT only supports models that use [**static** quantization](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#enable_int8_c) with [**symmetric quantization** for weights and activations](https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#intro-quantization).
🤗 Optimum provides a quantization config ready to be used with [`~onnxruntime.ORTQuantizer`] with the constraints of TensorRT quantization:
```python
>>> from optimum.onnxruntime import AutoQuantizationConfig
>>> qconfig = AutoQuantizationConfig.tensorrt(per_channel=False)
```
Using this `qconfig`, static quantization can be performed as explained in the [static quantization guide](quantization#static-quantization-example).
In the code sample below, after performing static quantization, the resulting model is loaded into the [`~onnxruntime.ORTModel`] class using TensorRT as the execution provider. ONNX Runtime graph optimization needs to be disabled for the model to be consumed and optimized by TensorRT, and the fact that INT8 operations are used needs to be specified to TensorRT.
```python
>>> import onnxruntime
>>> from transformers import AutoTokenizer
>>> from optimum.onnxruntime import ORTModelForSequenceClassification
>>> session_options = onnxruntime.SessionOptions()
>>> session_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL
>>> tokenizer = AutoTokenizer.from_pretrained("fxmarty/distilbert-base-uncased-sst2-onnx-int8-for-tensorrt")
>>> ort_model = ORTModelForSequenceClassification.from_pretrained(
... "fxmarty/distilbert-base-uncased-sst2-onnx-int8-for-tensorrt",
... provider="TensorrtExecutionProvider",
... session_options=session_options,
... provider_options={"trt_int8_enable": True},
>>> )
>>> inp = tokenizer("TensorRT is a bit painful to use, but at the end of day it runs smoothly and blazingly fast!", return_tensors="np")
>>> res = ort_model(**inp)
>>> print(res)
>>> print(ort_model.config.id2label[res.logits[0].argmax()])
>>> # SequenceClassifierOutput(loss=None, logits=array([[-0.545066 , 0.5609764]], dtype=float32), hidden_states=None, attentions=None)
>>> # POSITIVE
```
The model can then be used with the common 🤗 Transformers API for inference and evaluation, such as [pipelines](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/pipelines).
### TensorRT limitations for quantized models
As highlighted in the previous section, TensorRT supports only a limited range of quantized models:
* Static quantization only
* Weights and activations quantization ranges are symmetric
* Weights need to be stored in float32 in the ONNX model, thus there is no storage space saving from quantization. TensorRT indeed requires to insert full Quantize + Dequantize pairs. Normally, weights would be stored in fixed point 8-bits format and only a `DequantizeLinear` would be applied on the weights.
In case `provider="TensorrtExecutionProvider"` is passed and the model has not been quantized strictly following these constraints, various errors may be raised, where error messages can be unclear.
### Observed time gains
Nvidia Nsight Systems tool can be used to profile the execution time on GPU. Before profiling or measuring latency/throughput, it is a good practice to do a few **warmup steps**.
Coming soon!
| optimum/docs/source/onnxruntime/usage_guides/gpu.mdx/0 | {
"file_path": "optimum/docs/source/onnxruntime/usage_guides/gpu.mdx",
"repo_id": "optimum",
"token_count": 7219
} | 320 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Question answering
The script [`run_qa.py`](https://github.com/huggingface/optimum/blob/main/examples/onnxruntime/optimization/question-answering/run_qa.py)
allows us to apply graph optimizations using [ONNX Runtime](https://github.com/microsoft/onnxruntime) for question answering tasks.
Note that if your dataset contains samples with no possible answers (like SQuAD version 2), you need to pass along
the flag `--version_2_with_negative`.
The following example applies graph optimizations on a DistilBERT fine-tuned on the SQuAD1.0 dataset. Here the optimization level is selected to be 1, enabling basic optimizations such as redundant node eliminations and constant folding. Higher optimization level will result in hardware dependent optimized graph.
```bash
python run_qa.py \
--model_name_or_path distilbert-base-uncased-distilled-squad \
--dataset_name squad \
--optimization_level 1 \
--do_eval \
--output_dir /tmp/optimized_distilbert_squad
```
In order to apply dynamic or static quantization, `quantization_approach` must be set to respectively `dynamic` or `static`.
| optimum/examples/onnxruntime/optimization/question-answering/README.md/0 | {
"file_path": "optimum/examples/onnxruntime/optimization/question-answering/README.md",
"repo_id": "optimum",
"token_count": 474
} | 321 |
#!/usr/bin/env python
# coding=utf-8
# Copyright The HuggingFace Team and The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for multiple choice.
"""
# You can also adapt this script on your own multiple choice task. Pointers for this are left as comments.
import json
import logging
import os
import sys
from dataclasses import dataclass, field
from functools import partial
from itertools import chain
from typing import Optional
import datasets
import numpy as np
import transformers
from datasets import load_dataset
from onnxruntime.quantization import QuantFormat, QuantizationMode, QuantType
from transformers import AutoTokenizer, HfArgumentParser, TrainingArguments
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.utils import check_min_version
from optimum.onnxruntime import ORTModelForMultipleChoice, ORTQuantizer
from optimum.onnxruntime.configuration import AutoCalibrationConfig, QuantizationConfig
from optimum.onnxruntime.model import ORTModel
from optimum.onnxruntime.preprocessors import QuantizationPreprocessor
from optimum.onnxruntime.preprocessors.passes import (
ExcludeGeLUNodes,
ExcludeLayerNormNodes,
ExcludeNodeAfter,
ExcludeNodeFollowedBy,
)
# Will error if the minimal version of Transformers is not installed. The version of transformers must be >= 4.19.0
# as the export to onnx of multiple choice topologies was added in this release. Remove at your own risks.
check_min_version("4.19.0")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=1024,
metadata={
"help": "The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class OptimizationArguments:
"""
Arguments pertaining to what type of optimization we are going to apply on the model.
"""
quantization_approach: str = field(
default="dynamic",
metadata={"help": "The quantization approach. Supported approach are static and dynamic."},
)
per_channel: bool = field(
default=False,
metadata={"help": "Whether to quantize the weights per channel."},
)
reduce_range: bool = field(
default=False,
metadata={
"help": "Whether to quantize the weights with 7-bits. It may improve the accuracy for some models running "
"on non-VNNI machine, especially for per-channel mode."
},
)
calibration_method: str = field(
default="minmax",
metadata={
"help": "The method chosen to calculate the activation quantization parameters using the calibration "
"dataset. Current supported calibration methods are minmax, entropy and percentile."
},
)
num_calibration_samples: int = field(
default=100,
metadata={"help": "Number of examples to use for the calibration step resulting from static quantization."},
)
num_calibration_shards: int = field(
default=1,
metadata={
"help": "How many shards to split the calibration dataset into. Useful for the entropy and percentile "
"calibration method."
},
)
calibration_batch_size: int = field(
default=8,
metadata={"help": "The batch size for the calibration step."},
)
calibration_histogram_percentile: float = field(
default=99.999,
metadata={"help": "The percentile used for the percentile calibration method."},
)
calibration_moving_average: bool = field(
default=False,
metadata={
"help": "Whether to compute the moving average of the minimum and maximum values for the minmax "
"calibration method."
},
)
calibration_moving_average_constant: float = field(
default=0.01,
metadata={
"help": "Constant smoothing factor to use when computing the moving average of the minimum and maximum "
"values. Effective only when the selected calibration method is minmax and `calibration_moving_average` is "
"set to True."
},
)
execution_provider: str = field(
default="CPUExecutionProvider",
metadata={"help": "ONNX Runtime execution provider to use for inference."},
)
@dataclass
class OnnxExportArguments:
"""
Arguments to decide how the ModelProto will be saved.
"""
# TODO: currently onnxruntime put external data in different path than the model proto, which will cause problem on re-loading it.
# https://github.com/microsoft/onnxruntime/issues/12576
use_external_data_format: bool = field(
default=False,
metadata={"help": "Whether to use external data format to store model whose size is >= 2Gb."},
)
def main():
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser(
(ModelArguments, DataTrainingArguments, TrainingArguments, OptimizationArguments, OnnxExportArguments)
)
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args, optim_args, onnx_export_args = parser.parse_json_file(
json_file=os.path.abspath(sys.argv[1])
)
else:
model_args, data_args, training_args, optim_args, onnx_export_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Optimization with the following parameters {optim_args}")
if os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
os.makedirs(training_args.output_dir, exist_ok=True)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.train_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
use_auth_token=True if model_args.use_auth_token else None,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
# Preprocessing the datasets.
def preprocess_function(examples, tokenizer: PreTrainedTokenizerBase):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=min(data_args.max_seq_length, tokenizer.model_max_length),
padding="max_length",
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name or model_args.model_name_or_path)
apply_static_quantization = optim_args.quantization_approach == "static"
# Create the quantization configuration containing all the quantization parameters
qconfig = QuantizationConfig(
is_static=apply_static_quantization,
format=QuantFormat.QDQ if apply_static_quantization else QuantFormat.QOperator,
mode=QuantizationMode.QLinearOps if apply_static_quantization else QuantizationMode.IntegerOps,
activations_dtype=QuantType.QInt8 if apply_static_quantization else QuantType.QUInt8,
weights_dtype=QuantType.QInt8,
per_channel=optim_args.per_channel,
reduce_range=optim_args.reduce_range,
operators_to_quantize=["MatMul", "Add"],
)
# Export the model
model = ORTModelForMultipleChoice.from_pretrained(model_args.model_name_or_path, export=True)
# Create the quantizer
quantizer = ORTQuantizer.from_pretrained(model)
ranges = None
quantization_preprocessor = None
if apply_static_quantization:
# Preprocess the calibration dataset
if "train" not in raw_datasets:
raise ValueError("Static quantization requires a train dataset for calibration")
calibration_dataset = raw_datasets["train"]
if optim_args.num_calibration_samples is not None:
num_calibration_samples = min(len(calibration_dataset), optim_args.num_calibration_samples)
calibration_dataset = calibration_dataset.select(range(num_calibration_samples))
with training_args.main_process_first(desc="Running tokenizer on the calibration dataset"):
calibration_dataset = calibration_dataset.map(
partial(preprocess_function, tokenizer=tokenizer),
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Remove the unnecessary columns of the calibration dataset before the calibration step
calibration_dataset = quantizer.clean_calibration_dataset(calibration_dataset)
# Create the calibration configuration given the selected calibration method
if optim_args.calibration_method == "entropy":
calibration_config = AutoCalibrationConfig.entropy(calibration_dataset)
elif optim_args.calibration_method == "percentile":
calibration_config = AutoCalibrationConfig.percentiles(
calibration_dataset,
percentile=optim_args.calibration_histogram_percentile,
)
else:
calibration_config = AutoCalibrationConfig.minmax(
calibration_dataset,
optim_args.calibration_moving_average,
optim_args.calibration_moving_average_constant,
)
if not 1 <= optim_args.num_calibration_shards <= len(calibration_dataset):
raise ValueError(
f"Invalid value of number of shards {optim_args.num_calibration_shards} chosen to split the calibration"
f" dataset, should be higher than 0 and lower or equal to the number of samples "
f"{len(calibration_dataset)}."
)
for i in range(optim_args.num_calibration_shards):
shard = calibration_dataset.shard(optim_args.num_calibration_shards, i)
quantizer.partial_fit(
dataset=shard,
calibration_config=calibration_config,
operators_to_quantize=qconfig.operators_to_quantize,
batch_size=optim_args.calibration_batch_size,
use_external_data_format=onnx_export_args.use_external_data_format,
)
ranges = quantizer.compute_ranges()
# Create a quantization preprocessor to determine the nodes to exclude when applying static quantization
quantization_preprocessor = QuantizationPreprocessor()
# Exclude the nodes constituting LayerNorm
quantization_preprocessor.register_pass(ExcludeLayerNormNodes())
# Exclude the nodes constituting GELU
quantization_preprocessor.register_pass(ExcludeGeLUNodes())
# Exclude the residual connection Add nodes
quantization_preprocessor.register_pass(ExcludeNodeAfter("Add", "Add"))
# Exclude the Add nodes following the Gather operator
quantization_preprocessor.register_pass(ExcludeNodeAfter("Gather", "Add"))
# Exclude the Add nodes followed by the Softmax operator
quantization_preprocessor.register_pass(ExcludeNodeFollowedBy("Add", "Softmax"))
# Apply quantization on the model
quantizer.quantize(
save_dir=training_args.output_dir,
calibration_tensors_range=ranges,
quantization_config=qconfig,
preprocessor=quantization_preprocessor,
use_external_data_format=onnx_export_args.use_external_data_format,
)
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Preprocess the evaluation dataset
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="Running tokenizer on the validation dataset"):
eval_dataset = eval_dataset.map(
partial(preprocess_function, tokenizer=tokenizer),
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
ort_model = ORTModel(
os.path.join(training_args.output_dir, "model_quantized.onnx"),
execution_provider=optim_args.execution_provider,
compute_metrics=compute_metrics,
label_names=["label"],
)
outputs = ort_model.evaluation_loop(eval_dataset)
# Save evaluation metrics
with open(os.path.join(training_args.output_dir, "eval_results.json"), "w") as f:
json.dump(outputs.metrics, f, indent=4, sort_keys=True)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| optimum/examples/onnxruntime/quantization/multiple-choice/run_swag.py/0 | {
"file_path": "optimum/examples/onnxruntime/quantization/multiple-choice/run_swag.py",
"repo_id": "optimum",
"token_count": 7407
} | 322 |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library models for sequence to sequence.
"""
# You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments.
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
import transformers
from datasets import load_dataset
from filelock import FileLock
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
MBart50Tokenizer,
MBart50TokenizerFast,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry
from transformers.utils.versions import require_version
from optimum.onnxruntime import ORTSeq2SeqTrainer, ORTSeq2SeqTrainingArguments
# Might have error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.34.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt")
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# A list of all multilingual tokenizer which require lang attribute.
MULTILINGUAL_TOKENIZERS = [MBartTokenizer, MBartTokenizerFast, MBart50Tokenizer, MBart50TokenizerFast]
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
)
},
)
use_auth_token: bool = field(
default=None,
metadata={
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
"execute code present on the Hub on your local machine."
)
},
)
resize_position_embeddings: Optional[bool] = field(
default=None,
metadata={
"help": (
"Whether to automatically resize the position embeddings if `max_source_length` exceeds "
"the model's position embeddings."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
lang: Optional[str] = field(default=None, metadata={"help": "Language id for summarization."})
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
text_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."},
)
summary_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."}
)
validation_file: Optional[str] = field(
default=None,
metadata={
"help": (
"An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
)
},
)
test_file: Optional[str] = field(
default=None,
metadata={
"help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)."
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_source_length: Optional[int] = field(
default=1024,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to model maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
)
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
source_prefix: Optional[str] = field(
default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."}
)
forced_bos_token: Optional[str] = field(
default=None,
metadata={
"help": (
"The token to force as the first generated token after the decoder_start_token_id. "
"Useful for multilingual models like mBART where the first generated token"
"needs to be the target language token (Usually it is the target language token)"
)
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training, validation, or test file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
summarization_name_mapping = {
"amazon_reviews_multi": ("review_body", "review_title"),
"big_patent": ("description", "abstract"),
"cnn_dailymail": ("article", "highlights"),
"orange_sum": ("text", "summary"),
"pn_summary": ("article", "summary"),
"psc": ("extract_text", "summary_text"),
"samsum": ("dialogue", "summary"),
"thaisum": ("body", "summary"),
"xglue": ("news_body", "news_title"),
"xsum": ("document", "summary"),
"wiki_summary": ("article", "highlights"),
"multi_news": ("document", "summary"),
}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, ORTSeq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if model_args.use_auth_token is not None:
warnings.warn("The `use_auth_token` argument is deprecated and will be removed in v4.34.", FutureWarning)
if model_args.token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
model_args.token = model_args.use_auth_token
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_summarization", model_args, data_args)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
if data_args.source_prefix is None and model_args.model_name_or_path in [
"t5-small",
"t5-base",
"t5-large",
"t5-3b",
"t5-11b",
]:
logger.warning(
"You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with "
"`--source_prefix 'summarize: ' `"
)
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full texts and the second column for the
# summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
if model.config.decoder_start_token_id is None and isinstance(tokenizer, (MBartTokenizer, MBartTokenizerFast)):
if isinstance(tokenizer, MBartTokenizer):
model.config.decoder_start_token_id = tokenizer.lang_code_to_id[data_args.lang]
else:
model.config.decoder_start_token_id = tokenizer.convert_tokens_to_ids(data_args.lang)
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
if (
hasattr(model.config, "max_position_embeddings")
and model.config.max_position_embeddings < data_args.max_source_length
):
if model_args.resize_position_embeddings is None:
logger.warning(
"Increasing the model's number of position embedding vectors from"
f" {model.config.max_position_embeddings} to {data_args.max_source_length}."
)
model.resize_position_embeddings(data_args.max_source_length)
elif model_args.resize_position_embeddings:
model.resize_position_embeddings(data_args.max_source_length)
else:
raise ValueError(
f"`--max_source_length` is set to {data_args.max_source_length}, but the model only has"
f" {model.config.max_position_embeddings} position encodings. Consider either reducing"
f" `--max_source_length` to {model.config.max_position_embeddings} or to automatically resize the"
" model's position encodings by passing `--resize_position_embeddings`."
)
prefix = data_args.source_prefix if data_args.source_prefix is not None else ""
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
if isinstance(tokenizer, tuple(MULTILINGUAL_TOKENIZERS)):
assert (
data_args.lang is not None
), f"{tokenizer.__class__.__name__} is a multilingual tokenizer which requires --lang argument"
tokenizer.src_lang = data_args.lang
tokenizer.tgt_lang = data_args.lang
# For multilingual translation models like mBART-50 and M2M100 we need to force the target language token
# as the first generated token. We ask the user to explicitly provide this as --forced_bos_token argument.
forced_bos_token_id = (
tokenizer.lang_code_to_id[data_args.forced_bos_token] if data_args.forced_bos_token is not None else None
)
model.config.forced_bos_token_id = forced_bos_token_id
# Get the column names for input/target.
dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None)
if data_args.text_column is None:
text_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
text_column = data_args.text_column
if text_column not in column_names:
raise ValueError(
f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.summary_column is None:
summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
summary_column = data_args.summary_column
if summary_column not in column_names:
raise ValueError(
f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_target_length for training.
max_target_length = data_args.max_target_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for "
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
def preprocess_function(examples):
# remove pairs where at least one record is None
inputs, targets = [], []
for i in range(len(examples[text_column])):
if examples[text_column][i] and examples[summary_column][i]:
inputs.append(examples[text_column][i])
targets.append(examples[summary_column][i])
inputs = [prefix + inp for inp in inputs]
model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True)
# Tokenize targets with the `text_target` keyword argument
labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if training_args.do_eval:
max_target_length = data_args.val_max_target_length
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if training_args.do_predict:
max_target_length = data_args.val_max_target_length
predict_dataset = raw_datasets["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
# Metric
metric = evaluate.load("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
# Replace -100s used for padding as we can't decode them
preds = np.where(preds != -100, preds, tokenizer.pad_token_id)
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
result = {k: round(v * 100, 4) for k, v in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
return result
# Override the decoding parameters of Seq2SeqTrainer
training_args.generation_max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_target_length
)
training_args.generation_num_beams = (
data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
)
# Initialize our Trainer
trainer = ORTSeq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics if training_args.predict_with_generate else None,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
if training_args.do_eval:
logger.info("*** Evaluate ***")
if isinstance(eval_dataset, dict):
metrics = {}
for eval_ds_name, eval_ds in eval_dataset.items():
dataset_metrics = trainer.evaluate(eval_dataset=eval_ds, metric_key_prefix=f"eval_{eval_ds_name}")
metrics.update(dataset_metrics)
else:
metrics = trainer.evaluate(metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Predict ***")
predict_results = trainer.predict(predict_dataset, metric_key_prefix="predict")
metrics = predict_results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if trainer.is_world_process_zero():
if training_args.predict_with_generate:
predictions = predict_results.predictions
predictions = np.where(predictions != -100, predictions, tokenizer.pad_token_id)
predictions = tokenizer.batch_decode(
predictions, skip_special_tokens=True, clean_up_tokenization_spaces=True
)
predictions = [pred.strip() for pred in predictions]
output_prediction_file = os.path.join(training_args.output_dir, "generated_predictions.txt")
with open(output_prediction_file, "w") as writer:
writer.write("\n".join(predictions))
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
if data_args.lang is not None:
kwargs["language"] = data_args.lang
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
return results
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| optimum/examples/onnxruntime/training/summarization/run_summarization.py/0 | {
"file_path": "optimum/examples/onnxruntime/training/summarization/run_summarization.py",
"repo_id": "optimum",
"token_count": 13698
} | 323 |
# Copyright 2022 The HuggingFace and Meta Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from transformers import PretrainedConfig
import torch
from ...utils import logging, recurse_getattr, recurse_setattr
KNOWN_ACTIVATION_ATTRIBUTES = ["hidden_act", "activation", "act_fn", "activation_function"]
KNOWN_POS_EMB_ATTRIBUTES = ["position_embedding_type"]
KNOWN_NUM_LAYERS = ["num_hidden_layers", "num_layers", "encoder_layers", "n_layers"]
SUPPORTED_ACTIVATION_FUNCTIONS = ["gelu", "relu", "gelu_new"]
USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS = ["quick_gelu"]
logger = logging.get_logger(__name__)
class BetterTransformerBaseLayer:
def __init__(
self,
config: "PretrainedConfig",
):
r"""
Base layer for `BetterTransformer` integration. This class is used to wrap all the necessary
components for the `BetterTransformer` integration.
Args:
config (`transformers.PretrainedConfig`):
The config of the model.
"""
self.norm_first = False
self.use_gelu = False
self.act_fn = None
self.pos_emb_type = None
self.num_heads = None
self.embed_dim = None
self.num_layers = None
self.original_layers_mapping = {}
self.module_mapping = None
# Some models does not have some attributes thus needs to be ignored
# e.g. whisper does not have self_attn.k_proj.bias but has self_attn.v_proj.bias & self_attn.q_proj.bias
self.keys_to_ignore = []
# Get activation function
for attr in KNOWN_ACTIVATION_ATTRIBUTES:
if hasattr(config, attr):
self.act_fn = getattr(config, attr)
break
# if act_fn not found in the config, fall back to the private `_get_activation_function` if available
if self.act_fn is None and hasattr(self, "_get_activation_function"):
self.act_fn = self._get_activation_function(config)
# Get pos emb type
for attr in KNOWN_POS_EMB_ATTRIBUTES:
if hasattr(config, attr):
self.pos_emb_type = getattr(config, attr)
break
# Get num_layers
for attr in KNOWN_NUM_LAYERS:
if hasattr(config, attr):
self.num_layers = getattr(config, attr)
break
def validate_bettertransformer(self):
r"""
A wrapper function to validate the `BetterTransformer` implementation. Implements most relevant checks
that are present in: https://github.com/pytorch/pytorch/blob/0fc7de398636f4b53e6c3fde38b4e48a5ff5b37d/torch/nn/modules/transformer.py#L457-L475
"""
# Sanity checks
if self.num_heads is None:
raise ValueError("Number of heads not set for `BetterTransformer` integration.")
if self.embed_dim is None:
raise ValueError("Embedding dimension not set for `BetterTransformer` integration.")
if self.norm2_eps is None or self.norm1_eps is None:
raise ValueError("`norm2_eps` and `norm1_eps` not set for `BetterTransformer` integration.")
# Check positional embedding
if self.pos_emb_type is not None and self.pos_emb_type != "absolute":
raise ValueError(
f"Positional embedding type {self.pos_emb_type} not " "supported for `BetterTransformer` integration"
)
# Check norm1 epsilon and norm2 epsilon equality
if self.norm1_eps != self.norm2_eps:
raise ValueError("norm1_eps and norm2_eps must be equal for `BetterTransformer` integration.")
# Check activation function
if self.act_fn in USE_AT_OWN_RISK_ACTIVATION_FUNCTIONS:
logger.warning(
f"Overridding {self.act_fn} activation with gelu. Use the transformed model at your own risk, the output logits could be significantly different."
)
self.act_fn = "gelu"
elif self.act_fn not in SUPPORTED_ACTIVATION_FUNCTIONS:
raise ValueError(
f"Activation function {self.act_fn} not supported" " for `BetterTransformer` integration."
)
self.use_gelu = (self.act_fn == "gelu") or (self.act_fn == "gelu_new")
# Check num_head is even
if self.num_heads % 2 == 1:
raise ValueError(
f"Number of heads {self.num_heads} is not supported"
" for `BetterTransformer` integration."
f" Number of heads must be even."
)
def _revert(self, module: torch.nn.Module) -> torch.nn.Module:
if self.module_mapping is not None:
if "" in self.module_mapping.values():
for bt_module_attr_name, value in self.module_mapping.items():
if value == "":
module = getattr(self, bt_module_attr_name)
return module
else:
raise NotImplementedError("replacing a submodule in revert is not supported")
for modified_layer_key_names, original_layer_key_names in self.original_layers_mapping.items():
if isinstance(original_layer_key_names, list):
current_weight = getattr(self, modified_layer_key_names)
# Split the current weight n chunks - this is useful to split
# the qkv layers into q, k, v layers for example.
split_index = current_weight.shape[0] // len(original_layer_key_names)
for i, subparam_name in enumerate(original_layer_key_names):
if recurse_getattr(module, subparam_name) is None:
# this is for example the case if bias=False is set for a nn.Linear layer
continue
if module not in self.keys_to_ignore:
# TODO: remove the clone once https://github.com/huggingface/transformers/pull/27314 & https://github.com/huggingface/safetensors/pull/379 are released.
# Safetensors is bugged when using views of tensors.
parameter = current_weight[i * split_index : (i + 1) * split_index].clone()
if isinstance(recurse_getattr(module, subparam_name), torch.nn.Parameter):
parameter = torch.nn.Parameter(parameter)
recurse_setattr(module, subparam_name, parameter)
elif isinstance(original_layer_key_names, str):
if recurse_getattr(module, original_layer_key_names) is None:
# this is for example the case if bias=False is set for a nn.Linear layer
continue
parameter = getattr(self, modified_layer_key_names)
if isinstance(recurse_getattr(module, original_layer_key_names), torch.nn.Parameter):
parameter = torch.nn.Parameter(parameter)
recurse_setattr(module, original_layer_key_names, parameter)
else:
raise ValueError(
f"Invalid type {type(modified_layer_key_names)} for `original_layers_mapping`",
" please use either `str` or `list`.",
)
return module
| optimum/optimum/bettertransformer/models/base.py/0 | {
"file_path": "optimum/optimum/bettertransformer/models/base.py",
"repo_id": "optimum",
"token_count": 3422
} | 324 |
# Register commands in the Optimum CLI from a subpackage
It is possible to register a command in the Optimum CLI, either as a command or a subcommand of an already existing command.
Steps to follow:
1. Create a command as a subclass of `optimum.commands.BaseOptimumCLICommand`.
2. Create a Python file under `optimum/commands/register/`, and define a `REGISTER_COMMANDS` list variable there.
3. Fill the `REGISTER_COMMANDS` as follows:
```python
# CustomCommand1 and CustomCommand2 could also be defined in this file actually.
from ..my_custom_commands import CustomCommand1, CustomCommand2
from ..export import ExportCommand
REGISTER_COMMANDS = [
# CustomCommand1 will be registered as a subcommand of the root Optimum CLI.
CustomCommand1,
# CustomCommand2 will be registered as a subcommand of the `optimum-cli export` command.
(CustomCommand2, ExportCommand) # CustomCommand2 will be registered
]
```
| optimum/optimum/commands/register/README.md/0 | {
"file_path": "optimum/optimum/commands/register/README.md",
"repo_id": "optimum",
"token_count": 256
} | 325 |
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model export tasks manager."""
import importlib
import inspect
import itertools
import os
from functools import partial
from pathlib import Path
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
import huggingface_hub
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE
from packaging import version
from requests.exceptions import ConnectionError as RequestsConnectionError
from transformers import AutoConfig, PretrainedConfig, is_tf_available, is_torch_available
from transformers.utils import SAFE_WEIGHTS_NAME, TF2_WEIGHTS_NAME, WEIGHTS_NAME, logging
from ..utils import CONFIG_NAME
from ..utils.import_utils import is_onnx_available
if TYPE_CHECKING:
from .base import ExportConfig
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
if not is_torch_available() and not is_tf_available():
logger.warning(
"The export tasks are only supported for PyTorch or TensorFlow. You will not be able to export models"
" without one of these libraries installed."
)
if is_torch_available():
import torch
from transformers import PreTrainedModel
if is_tf_available():
from transformers import TFPreTrainedModel
ExportConfigConstructor = Callable[[PretrainedConfig], "ExportConfig"]
TaskNameToExportConfigDict = Dict[str, ExportConfigConstructor]
def is_backend_available(backend):
backend_availablilty = {
"onnx": is_onnx_available(),
"tflite": is_tf_available(),
}
return backend_availablilty[backend]
def make_backend_config_constructor_for_task(config_cls: Type, task: str) -> ExportConfigConstructor:
if "-with-past" in task:
if not getattr(config_cls, "SUPPORTS_PAST", False):
raise ValueError(f"{config_cls} does not support tasks with past.")
constructor = partial(config_cls, use_past=True, task=task.replace("-with-past", ""))
else:
constructor = partial(config_cls, task=task)
return constructor
def supported_tasks_mapping(
*supported_tasks: Union[str, Tuple[str, Tuple[str, ...]]], **exporters: str
) -> Dict[str, TaskNameToExportConfigDict]:
"""
Generates the mapping between supported tasks and their corresponding `ExportConfig` for a given model, for
every backend.
Args:
supported_tasks (`Tuple[Union[str, Tuple[str, Tuple[str, ...]]]`):
The names of the supported tasks.
If some task is supported by only a subset of all the backends, it can be specified as follows:
```python
>>> ("multiple-choice", ("onnx",))
```
The line above means that the multiple-choice task will be supported only by the ONNX backend.
exporters (`Dict[str, str]`):
The export backend name -> config class name mapping. For instance:
```python
>>> exporters = { # doctest: +SKIP
... "onnx": "BertOnnxConfig",
... "tflite": "BertTFLiteConfig",
... ...
... }
```
Returns:
`Dict[str, TaskNameToExportConfigDict]`: The dictionary mapping a task to an `ExportConfig` constructor.
"""
mapping = {}
for backend, config_cls_name in exporters.items():
if is_backend_available(backend):
config_cls = getattr(
importlib.import_module(f"optimum.exporters.{backend}.model_configs"), config_cls_name
)
mapping[backend] = {}
for task in supported_tasks:
if isinstance(task, tuple):
task, supported_backends_for_task = task
if backend not in supported_backends_for_task:
continue
config_constructor = make_backend_config_constructor_for_task(config_cls, task)
mapping[backend][task] = config_constructor
return mapping
def get_model_loaders_to_tasks(tasks_to_model_loaders: Dict[str, Union[str, Tuple[str]]]) -> Dict[str, str]:
"""
Reverses tasks_to_model_loaders while flattening the case where the same task maps to several
auto classes (e.g. automatic-speech-recognition).
"""
model_loaders_to_tasks = {}
for task, model_loaders in tasks_to_model_loaders.items():
if isinstance(model_loaders, str):
model_loaders_to_tasks[model_loaders] = task
else:
model_loaders_to_tasks.update({model_loader_name: task for model_loader_name in model_loaders})
return model_loaders_to_tasks
class TasksManager:
"""
Handles the `task name -> model class` and `architecture -> configuration` mappings.
"""
# Torch model loaders
_TRANSFORMERS_TASKS_TO_MODEL_LOADERS = {}
_DIFFUSERS_TASKS_TO_MODEL_LOADERS = {}
_TIMM_TASKS_TO_MODEL_LOADERS = {}
_LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP = {}
# TF model loaders
_TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS = {}
_LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP = {}
if is_torch_available():
# Refer to https://huggingface.co/datasets/huggingface/transformers-metadata/blob/main/pipeline_tags.json
# In case the same task (pipeline tag) may map to several loading classes, we use a tuple and the
# auto-class _model_mapping to determine the right one.
# TODO: having several tasks pointing to the same auto-model class is bug prone to auto-detect the
# task in a Hub repo that has no pipeline_tag, and no transformersInfo.pipeline_tag, as we then rely on
# on transformersInfo["auto_model"] and this dictionary.
_TRANSFORMERS_TASKS_TO_MODEL_LOADERS = {
"audio-classification": "AutoModelForAudioClassification",
"audio-frame-classification": "AutoModelForAudioFrameClassification",
"audio-xvector": "AutoModelForAudioXVector",
"automatic-speech-recognition": ("AutoModelForSpeechSeq2Seq", "AutoModelForCTC"),
"conversational": ("AutoModelForCausalLM", "AutoModelForSeq2SeqLM"),
"depth-estimation": "AutoModelForDepthEstimation",
"feature-extraction": "AutoModel",
"fill-mask": "AutoModelForMaskedLM",
"image-classification": "AutoModelForImageClassification",
"image-segmentation": ("AutoModelForImageSegmentation", "AutoModelForSemanticSegmentation"),
"image-to-image": "AutoModelForImageToImage",
"image-to-text": "AutoModelForVision2Seq",
"mask-generation": "AutoModel",
"masked-im": "AutoModelForMaskedImageModeling",
"multiple-choice": "AutoModelForMultipleChoice",
"object-detection": "AutoModelForObjectDetection",
"question-answering": "AutoModelForQuestionAnswering",
"semantic-segmentation": "AutoModelForSemanticSegmentation",
"text-to-audio": ("AutoModelForTextToSpectrogram", "AutoModelForTextToWaveform"),
"text-generation": "AutoModelForCausalLM",
"text2text-generation": "AutoModelForSeq2SeqLM",
"text-classification": "AutoModelForSequenceClassification",
"token-classification": "AutoModelForTokenClassification",
"zero-shot-image-classification": "AutoModelForZeroShotImageClassification",
"zero-shot-object-detection": "AutoModelForZeroShotObjectDetection",
}
_DIFFUSERS_TASKS_TO_MODEL_LOADERS = {
"stable-diffusion": "StableDiffusionPipeline",
"stable-diffusion-xl": "StableDiffusionXLImg2ImgPipeline",
}
_TIMM_TASKS_TO_MODEL_LOADERS = {
"image-classification": "create_model",
}
_SENTENCE_TRANSFORMERS_TASKS_TO_MODEL_LOADERS = {
"feature-extraction": "SentenceTransformer",
"sentence-similarity": "SentenceTransformer",
}
_LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP = {
"diffusers": _DIFFUSERS_TASKS_TO_MODEL_LOADERS,
"sentence_transformers": _SENTENCE_TRANSFORMERS_TASKS_TO_MODEL_LOADERS,
"timm": _TIMM_TASKS_TO_MODEL_LOADERS,
"transformers": _TRANSFORMERS_TASKS_TO_MODEL_LOADERS,
}
if is_tf_available():
_TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS = {
"conversational": ("TFAutoModelForCausalLM", "TFAutoModelForSeq2SeqLM"),
"document-question-answering": "TFAutoModelForDocumentQuestionAnswering",
"feature-extraction": "TFAutoModel",
"fill-mask": "TFAutoModelForMaskedLM",
"text-generation": "TFAutoModelForCausalLM",
"image-classification": "TFAutoModelForImageClassification",
"text2text-generation": "TFAutoModelForSeq2SeqLM",
"text-classification": "TFAutoModelForSequenceClassification",
"token-classification": "TFAutoModelForTokenClassification",
"multiple-choice": "TFAutoModelForMultipleChoice",
"object-detection": "TFAutoModelForObjectDetection",
"question-answering": "TFAutoModelForQuestionAnswering",
"image-segmentation": "TFAutoModelForImageSegmentation",
"masked-im": "TFAutoModelForMaskedImageModeling",
"semantic-segmentation": "TFAutoModelForSemanticSegmentation",
"automatic-speech-recognition": "TFAutoModelForSpeechSeq2Seq",
"audio-classification": "TFAutoModelForAudioClassification",
"audio-frame-classification": "TFAutoModelForAudioFrameClassification",
"audio-xvector": "TFAutoModelForAudioXVector",
"image-to-text": "TFAutoModelForVision2Seq",
"zero-shot-image-classification": "TFAutoModelForZeroShotImageClassification",
"zero-shot-object-detection": "TFAutoModelForZeroShotObjectDetection",
}
_LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP = {
"transformers": _TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS,
}
_SYNONYM_TASK_MAP = {
"audio-ctc": "automatic-speech-recognition",
"causal-lm": "text-generation",
"causal-lm-with-past": "text-generation-with-past",
"default": "feature-extraction",
"default-with-past": "feature-extraction-with-past",
"masked-lm": "fill-mask",
"mask-generation": "feature-extraction",
"sentence-similarity": "feature-extraction",
"seq2seq-lm": "text2text-generation",
"seq2seq-lm-with-past": "text2text-generation-with-past",
"sequence-classification": "text-classification",
"speech2seq-lm": "automatic-speech-recognition",
"speech2seq-lm-with-past": "automatic-speech-recognition-with-past",
"summarization": "text2text-generation",
"text-to-speech": "text-to-audio",
"translation": "text2text-generation",
"vision2seq-lm": "image-to-text",
"zero-shot-classification": "text-classification",
"image-feature-extraction": "feature-extraction",
}
# Reverse dictionaries str -> str, where several model loaders may map to the same task
_LIBRARY_TO_MODEL_LOADERS_TO_TASKS_MAP = {
"diffusers": get_model_loaders_to_tasks(_DIFFUSERS_TASKS_TO_MODEL_LOADERS),
"sentence_transformers": get_model_loaders_to_tasks(_SENTENCE_TRANSFORMERS_TASKS_TO_MODEL_LOADERS),
"timm": get_model_loaders_to_tasks(_TIMM_TASKS_TO_MODEL_LOADERS),
"transformers": get_model_loaders_to_tasks(_TRANSFORMERS_TASKS_TO_MODEL_LOADERS),
}
_LIBRARY_TO_TF_MODEL_LOADERS_TO_TASKS_MAP = {
"transformers": get_model_loaders_to_tasks(_TRANSFORMERS_TASKS_TO_TF_MODEL_LOADERS),
}
_CUSTOM_CLASSES = {
("pt", "pix2struct", "image-to-text"): ("transformers", "Pix2StructForConditionalGeneration"),
("pt", "pix2struct", "visual-question-answering"): ("transformers", "Pix2StructForConditionalGeneration"),
("pt", "visual-bert", "question-answering"): ("transformers", "VisualBertForQuestionAnswering"),
# VisionEncoderDecoderModel is not registered in AutoModelForDocumentQuestionAnswering
("pt", "vision-encoder-decoder", "document-question-answering"): ("transformers", "VisionEncoderDecoderModel"),
}
# TODO: why feature-extraction-with-past is here?
_ENCODER_DECODER_TASKS = (
"automatic-speech-recognition",
"document-question-answering",
"feature-extraction-with-past",
"image-to-text",
"text2text-generation",
"visual-question-answering",
)
_MODEL_TYPE_FOR_DEFAULT_CONFIG = {
"timm": "default-timm-config",
}
_DIFFUSERS_SUPPORTED_MODEL_TYPE = {
"clip-text-model": supported_tasks_mapping(
"feature-extraction",
onnx="CLIPTextOnnxConfig",
),
"clip-text-with-projection": supported_tasks_mapping(
"feature-extraction",
onnx="CLIPTextWithProjectionOnnxConfig",
),
"unet": supported_tasks_mapping(
"semantic-segmentation",
onnx="UNetOnnxConfig",
),
"vae-encoder": supported_tasks_mapping(
"semantic-segmentation",
onnx="VaeEncoderOnnxConfig",
),
"vae-decoder": supported_tasks_mapping(
"semantic-segmentation",
onnx="VaeDecoderOnnxConfig",
),
}
_TIMM_SUPPORTED_MODEL_TYPE = {
"default-timm-config": supported_tasks_mapping("image-classification", onnx="TimmDefaultOnnxConfig"),
}
_SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE = {
"clip": supported_tasks_mapping(
"feature-extraction",
"sentence-similarity",
onnx="SentenceTransformersCLIPOnnxConfig",
),
"transformer": supported_tasks_mapping(
"feature-extraction",
"sentence-similarity",
onnx="SentenceTransformersTransformerOnnxConfig",
),
}
# TODO: some models here support text-generation export but are not supported in ORTModelForCausalLM
# Set of model topologies we support associated to the tasks supported by each topology and the factory
# TODO: remove `-with-past` tasks and rather rely on `variant`.
_SUPPORTED_MODEL_TYPE = {
"audio-spectrogram-transformer": supported_tasks_mapping(
"feature-extraction",
"audio-classification",
onnx="ASTOnnxConfig",
),
"albert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="AlbertOnnxConfig",
tflite="AlbertTFLiteConfig",
),
"bart": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text2text-generation",
"text2text-generation-with-past",
"text-classification",
"question-answering",
onnx="BartOnnxConfig",
),
# BEiT cannot be used with the masked image modeling autoclass, so this task is excluded here
"beit": supported_tasks_mapping("feature-extraction", "image-classification", onnx="BeitOnnxConfig"),
"bert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for BERT
# "text-generation",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="BertOnnxConfig",
tflite="BertTFLiteConfig",
),
# For big-bird and bigbird-pegasus being unsupported, refer to model_configs.py
# "big-bird": supported_tasks_mapping(
# "feature-extraction",
# "fill-mask",
# # the logic for text-generation is not supported for big-bird
# # "text-generation",
# "text-classification",
# "multiple-choice",
# "token-classification",
# "question-answering",
# onnx="BigBirdOnnxConfig",
# # TODO: check model_config.py to know why it cannot be enabled yet.
# # tflite="BigBirdTFLiteConfig",
# ),
# "bigbird-pegasus": supported_tasks_mapping(
# "feature-extraction",
# "feature-extraction-with-past",
# "text-generation",
# "text-generation-with-past",
# "text2text-generation",
# "text2text-generation-with-past",
# "text-classification",
# "question-answering",
# onnx="BigBirdPegasusOnnxConfig",
# ),
"blenderbot": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="BlenderbotOnnxConfig",
),
"blenderbot-small": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="BlenderbotSmallOnnxConfig",
),
"bloom": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
"token-classification",
onnx="BloomOnnxConfig",
),
"camembert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for camembert
# "text-generation",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="CamembertOnnxConfig",
tflite="CamembertTFLiteConfig",
),
"clip": supported_tasks_mapping(
"feature-extraction",
"zero-shot-image-classification",
onnx="CLIPOnnxConfig",
),
"codegen": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
onnx="CodeGenOnnxConfig",
),
"convbert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="ConvBertOnnxConfig",
tflite="ConvBertTFLiteConfig",
),
"convnext": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="ConvNextOnnxConfig",
),
"convnextv2": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="ConvNextV2OnnxConfig",
),
"cvt": supported_tasks_mapping("feature-extraction", "image-classification", onnx="CvTOnnxConfig"),
"data2vec-text": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="Data2VecTextOnnxConfig",
),
"data2vec-vision": supported_tasks_mapping(
"feature-extraction",
"image-classification",
# ONNX doesn't support `adaptive_avg_pool2d` yet
# "semantic-segmentation",
onnx="Data2VecVisionOnnxConfig",
),
"data2vec-audio": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
"audio-frame-classification",
"audio-xvector",
onnx="Data2VecAudioOnnxConfig",
),
"deberta": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"token-classification",
"question-answering",
onnx="DebertaOnnxConfig",
tflite="DebertaTFLiteConfig",
),
"deberta-v2": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
("multiple-choice", ("onnx",)),
"token-classification",
"question-answering",
onnx="DebertaV2OnnxConfig",
tflite="DebertaV2TFLiteConfig",
),
"deit": supported_tasks_mapping(
"feature-extraction",
"image-classification",
"masked-im",
onnx="DeiTOnnxConfig",
),
"detr": supported_tasks_mapping(
"feature-extraction",
"object-detection",
"image-segmentation",
onnx="DetrOnnxConfig",
),
"distilbert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="DistilBertOnnxConfig",
tflite="DistilBertTFLiteConfig",
),
"donut": supported_tasks_mapping(
"image-to-text",
"image-to-text-with-past",
"document-question-answering",
"document-question-answering-with-past",
onnx="VisionEncoderDecoderOnnxConfig",
),
"donut-swin": supported_tasks_mapping(
"feature-extraction",
onnx="DonutSwinOnnxConfig",
),
"dpt": supported_tasks_mapping(
"feature-extraction",
"depth-estimation",
"image-segmentation",
"semantic-segmentation",
onnx="DptOnnxConfig",
),
"electra": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for electra
# "text-generation",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="ElectraOnnxConfig",
tflite="ElectraTFLiteConfig",
),
"encoder-decoder": supported_tasks_mapping(
"text2text-generation",
"text2text-generation-with-past",
onnx="EncoderDecoderOnnxConfig",
),
"esm": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"token-classification",
onnx="EsmOnnxConfig",
),
"falcon": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"question-answering",
"text-generation",
"text-generation-with-past",
"token-classification",
onnx="FalconOnnxConfig",
),
"flaubert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="FlaubertOnnxConfig",
tflite="FlaubertTFLiteConfig",
),
"gemma": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="GemmaOnnxConfig",
),
"glpn": supported_tasks_mapping(
"feature-extraction",
"depth-estimation",
onnx="GlpnOnnxConfig",
),
"gpt2": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
"token-classification",
onnx="GPT2OnnxConfig",
),
"gpt-bigcode": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
"token-classification",
onnx="GPTBigCodeOnnxConfig",
),
"gptj": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"question-answering",
"text-classification",
onnx="GPTJOnnxConfig",
),
"gpt-neo": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="GPTNeoOnnxConfig",
),
"gpt-neox": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="GPTNeoXOnnxConfig",
),
"groupvit": supported_tasks_mapping(
"feature-extraction",
onnx="GroupViTOnnxConfig",
),
"hubert": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
onnx="HubertOnnxConfig",
),
"ibert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="IBertOnnxConfig",
),
"imagegpt": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="ImageGPTOnnxConfig",
),
"layoutlm": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"token-classification",
onnx="LayoutLMOnnxConfig",
),
# "layoutlmv2": supported_tasks_mapping(
# "feature-extraction",
# "question-answering",
# "text-classification",
# "token-classification",
# onnx="LayoutLMv2OnnxConfig",
# ),
"layoutlmv3": supported_tasks_mapping(
"feature-extraction",
"question-answering",
"text-classification",
"token-classification",
onnx="LayoutLMv3OnnxConfig",
),
"lilt": supported_tasks_mapping(
"feature-extraction",
"question-answering",
"text-classification",
"token-classification",
onnx="LiltOnnxConfig",
),
"levit": supported_tasks_mapping("feature-extraction", "image-classification", onnx="LevitOnnxConfig"),
"longt5": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="LongT5OnnxConfig",
),
# "longformer": supported_tasks_mapping(
# "feature-extraction",
# "fill-mask",
# "multiple-choice",
# "question-answering",
# "text-classification",
# "token-classification",
# onnx_config_cls="models.longformer.LongformerOnnxConfig",
# ),
"marian": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text2text-generation",
"text2text-generation-with-past",
"text-generation",
"text-generation-with-past",
onnx="MarianOnnxConfig",
),
"markuplm": supported_tasks_mapping(
"feature-extraction",
"text-classification",
"token-classification",
"question-answering",
onnx="MarkupLMOnnxConfig",
),
"mbart": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text2text-generation",
"text2text-generation-with-past",
"text-classification",
"question-answering",
onnx="MBartOnnxConfig",
),
"mistral": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="MistralOnnxConfig",
),
# TODO: enable once the missing operator is supported.
# "mctct": supported_tasks_mapping(
# "feature-extraction",
# "automatic-speech-recognition",
# onnx="MCTCTOnnxConfig",
# ),
"mobilebert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="MobileBertOnnxConfig",
tflite="MobileBertTFLiteConfig",
),
"mobilevit": supported_tasks_mapping(
"feature-extraction",
"image-classification",
"image-segmentation",
onnx="MobileViTOnnxConfig",
),
"mobilenet-v1": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="MobileNetV1OnnxConfig",
),
"mobilenet-v2": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="MobileNetV2OnnxConfig",
),
"mpnet": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="MPNetOnnxConfig",
tflite="MPNetTFLiteConfig",
),
"mpt": supported_tasks_mapping(
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="MPTOnnxConfig",
),
"mt5": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="MT5OnnxConfig",
),
"musicgen": supported_tasks_mapping(
"text-to-audio", # "variant" handles the "-with-past". We should generalize that.
onnx="MusicgenOnnxConfig",
),
"m2m-100": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="M2M100OnnxConfig",
),
"nystromformer": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"multiple-choice",
"question-answering",
"text-classification",
"token-classification",
onnx="NystromformerOnnxConfig",
),
"owlv2": supported_tasks_mapping(
"feature-extraction",
"zero-shot-object-detection",
onnx="OwlV2OnnxConfig",
),
"owlvit": supported_tasks_mapping(
"feature-extraction",
"zero-shot-object-detection",
onnx="OwlViTOnnxConfig",
),
"opt": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"question-answering",
"text-classification",
onnx="OPTOnnxConfig",
),
"qwen2": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="Qwen2OnnxConfig",
),
"llama": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="LlamaOnnxConfig",
),
"pegasus": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="PegasusOnnxConfig",
),
"perceiver": supported_tasks_mapping(
"fill-mask",
"image-classification",
"text-classification",
onnx="PerceiverOnnxConfig",
),
"phi": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text-generation",
"text-generation-with-past",
"text-classification",
onnx="PhiOnnxConfig",
),
"pix2struct": supported_tasks_mapping(
"image-to-text",
"image-to-text-with-past",
"visual-question-answering",
"visual-question-answering-with-past",
onnx="Pix2StructOnnxConfig",
),
"poolformer": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="PoolFormerOnnxConfig",
),
"regnet": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="RegNetOnnxConfig",
),
"resnet": supported_tasks_mapping(
"feature-extraction",
"image-classification",
onnx="ResNetOnnxConfig",
tflite="ResNetTFLiteConfig",
),
"roberta": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for roberta
# "text-generation",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="RobertaOnnxConfig",
tflite="RobertaTFLiteConfig",
),
"roformer": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for roformer
# "text-generation",
"text-classification",
"token-classification",
"multiple-choice",
"question-answering",
"token-classification",
onnx="RoFormerOnnxConfig",
tflite="RoFormerTFLiteConfig",
),
"sam": supported_tasks_mapping(
"feature-extraction",
onnx="SamOnnxConfig",
),
"segformer": supported_tasks_mapping(
"feature-extraction",
"image-classification",
"image-segmentation",
"semantic-segmentation",
onnx="SegformerOnnxConfig",
),
"sew": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
onnx="SEWOnnxConfig",
),
"sew-d": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
onnx="SEWDOnnxConfig",
),
"speech-to-text": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"automatic-speech-recognition",
"automatic-speech-recognition-with-past",
onnx="Speech2TextOnnxConfig",
),
# TODO: SpeechT5 can also support audio-to-audio and automatic-speech-recognition.
"speecht5": supported_tasks_mapping(
"text-to-audio",
onnx="SpeechT5OnnxConfig",
),
"splinter": supported_tasks_mapping(
"feature-extraction",
"question-answering",
onnx="SplinterOnnxConfig",
),
"squeezebert": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="SqueezeBertOnnxConfig",
),
"swin": supported_tasks_mapping(
"feature-extraction",
"image-classification",
"masked-im",
onnx="SwinOnnxConfig",
),
"swin2sr": supported_tasks_mapping(
"feature-extraction",
"image-to-image",
onnx="Swin2srOnnxConfig",
),
"t5": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"text2text-generation",
"text2text-generation-with-past",
onnx="T5OnnxConfig",
),
"table-transformer": supported_tasks_mapping(
"feature-extraction",
"object-detection",
onnx="TableTransformerOnnxConfig",
),
"trocr": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"image-to-text",
"image-to-text-with-past",
onnx="TrOCROnnxConfig",
),
"unispeech": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
onnx="UniSpeechOnnxConfig",
),
"unispeech-sat": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
"audio-frame-classification",
"audio-xvector",
onnx="UniSpeechSATOnnxConfig",
),
"vision-encoder-decoder": supported_tasks_mapping(
"image-to-text",
"image-to-text-with-past",
"document-question-answering",
"document-question-answering-with-past",
onnx="VisionEncoderDecoderOnnxConfig",
),
"vit": supported_tasks_mapping(
"feature-extraction", "image-classification", "masked-im", onnx="ViTOnnxConfig"
),
"vits": supported_tasks_mapping(
"text-to-audio",
onnx="VitsOnnxConfig",
),
"wavlm": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
"audio-frame-classification",
"audio-xvector",
onnx="WavLMOnnxConfig",
),
"wav2vec2": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
"audio-frame-classification",
"audio-xvector",
onnx="Wav2Vec2OnnxConfig",
),
"wav2vec2-conformer": supported_tasks_mapping(
"feature-extraction",
"automatic-speech-recognition",
"audio-classification",
"audio-frame-classification",
"audio-xvector",
onnx="Wav2Vec2ConformerOnnxConfig",
),
"whisper": supported_tasks_mapping(
"feature-extraction",
"feature-extraction-with-past",
"audio-classification",
"automatic-speech-recognition",
"automatic-speech-recognition-with-past",
onnx="WhisperOnnxConfig",
),
"xlm": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for xlm
# "text-generation",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="XLMOnnxConfig",
tflite="XLMTFLiteConfig",
),
"xlm-roberta": supported_tasks_mapping(
"feature-extraction",
"fill-mask",
# the logic for text-generation is not supported for xlm-roberta
# "text-generation",
"text-classification",
"multiple-choice",
"token-classification",
"question-answering",
onnx="XLMRobertaOnnxConfig",
tflite="XLMRobertaTFLiteConfig",
),
"yolos": supported_tasks_mapping(
"feature-extraction",
"object-detection",
onnx="YolosOnnxConfig",
),
}
_LIBRARY_TO_SUPPORTED_MODEL_TYPES = {
"diffusers": _DIFFUSERS_SUPPORTED_MODEL_TYPE,
"sentence_transformers": _SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE,
"timm": _TIMM_SUPPORTED_MODEL_TYPE,
"transformers": _SUPPORTED_MODEL_TYPE,
}
_UNSUPPORTED_CLI_MODEL_TYPE = {
"unet",
"vae-encoder",
"vae-decoder",
"clip-text-model",
"clip-text-with-projection",
"trocr", # TODO: why?
}
_SUPPORTED_CLI_MODEL_TYPE = (
set(_SUPPORTED_MODEL_TYPE.keys())
| set(_DIFFUSERS_SUPPORTED_MODEL_TYPE.keys())
| set(_TIMM_SUPPORTED_MODEL_TYPE.keys())
| set(_SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE.keys())
) - _UNSUPPORTED_CLI_MODEL_TYPE
@classmethod
def create_register(
cls, backend: str, overwrite_existing: bool = False
) -> Callable[[str, Tuple[str, ...]], Callable[[Type], Type]]:
"""
Creates a register function for the specified backend.
Args:
backend (`str`):
The name of the backend that the register function will handle.
overwrite_existing (`bool`, defaults to `False`):
Whether or not the register function is allowed to overwrite an already existing config.
Returns:
`Callable[[str, Tuple[str, ...]], Callable[[Type], Type]]`: A decorator taking the model type and a the
supported tasks.
Example:
```python
>>> register_for_new_backend = create_register("new-backend")
>>> @register_for_new_backend("bert", "text-classification", "token-classification")
>>> class BertNewBackendConfig(NewBackendConfig):
>>> pass
```
"""
def wrapper(
model_type: str, *supported_tasks: str, library_name: str = "transformers"
) -> Callable[[Type], Type]:
def decorator(config_cls: Type) -> Type:
supported_model_type_for_library = TasksManager._LIBRARY_TO_SUPPORTED_MODEL_TYPES[
library_name
] # This is a pointer.
mapping = supported_model_type_for_library.get(model_type, {})
mapping_backend = mapping.get(backend, {})
for task in supported_tasks:
normalized_task = task.replace("-with-past", "")
if normalized_task not in cls.get_all_tasks():
known_tasks = ", ".join(cls.get_all_tasks())
raise ValueError(
f'The TasksManager does not know the task called "{normalized_task}", known tasks: {known_tasks}.'
)
if not overwrite_existing and task in mapping_backend:
continue
mapping_backend[task] = make_backend_config_constructor_for_task(config_cls, task)
mapping[backend] = mapping_backend
supported_model_type_for_library[model_type] = mapping
return config_cls
return decorator
return wrapper
@staticmethod
def get_supported_tasks_for_model_type(
model_type: str, exporter: str, model_name: Optional[str] = None, library_name: Optional[str] = None
) -> TaskNameToExportConfigDict:
"""
Retrieves the `task -> exporter backend config constructors` map from the model type.
Args:
model_type (`str`):
The model type to retrieve the supported tasks for.
exporter (`str`):
The name of the exporter.
model_name (`Optional[str]`, defaults to `None`):
The name attribute of the model object, only used for the exception message.
library_name (`Optional[str]`, defaults to `None`):
The library name of the model. Can be any of "transformers", "timm", "diffusers", "sentence_transformers".
Returns:
`TaskNameToExportConfigDict`: The dictionary mapping each task to a corresponding `ExportConfig`
constructor.
"""
if library_name is None:
logger.warning(
'Not passing the argument `library_name` to `get_supported_tasks_for_model_type` is deprecated and the support will be removed in a future version of Optimum. Please specify a `library_name`. Defaulting to `"transformers`.'
)
# We are screwed if different dictionaries have the same keys.
supported_model_type_for_library = {
**TasksManager._DIFFUSERS_SUPPORTED_MODEL_TYPE,
**TasksManager._TIMM_SUPPORTED_MODEL_TYPE,
**TasksManager._SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE,
**TasksManager._SUPPORTED_MODEL_TYPE,
}
library_name = "transformers"
else:
supported_model_type_for_library = TasksManager._LIBRARY_TO_SUPPORTED_MODEL_TYPES[library_name]
model_type = model_type.lower().replace("_", "-")
model_type_and_model_name = f"{model_type} ({model_name})" if model_name else model_type
default_model_type = None
if library_name in TasksManager._MODEL_TYPE_FOR_DEFAULT_CONFIG:
default_model_type = TasksManager._MODEL_TYPE_FOR_DEFAULT_CONFIG[library_name]
if model_type not in supported_model_type_for_library:
if default_model_type is not None:
model_type = default_model_type
else:
raise KeyError(
f"{model_type_and_model_name} is not supported yet for {library_name}. "
f"Only {list(supported_model_type_for_library.keys())} are supported for the library {library_name}. "
f"If you want to support {model_type} please propose a PR or open up an issue."
)
if exporter not in supported_model_type_for_library[model_type]:
raise KeyError(
f"{model_type_and_model_name} is not supported yet with the {exporter} backend. "
f"Only {list(supported_model_type_for_library[model_type].keys())} are supported. "
f"If you want to support {exporter} please propose a PR or open up an issue."
)
return supported_model_type_for_library[model_type][exporter]
@staticmethod
def get_supported_model_type_for_task(task: str, exporter: str) -> List[str]:
"""
Returns the list of supported architectures by the exporter for a given task. Transformers-specific.
"""
return [
model_type.replace("-", "_")
for model_type in TasksManager._SUPPORTED_MODEL_TYPE
if task in TasksManager._SUPPORTED_MODEL_TYPE[model_type][exporter]
]
@staticmethod
def synonyms_for_task(task: str) -> Set[str]:
synonyms = [k for k, v in TasksManager._SYNONYM_TASK_MAP.items() if v == task]
synonyms += [k for k, v in TasksManager._SYNONYM_TASK_MAP.items() if v == TasksManager.map_from_synonym(task)]
synonyms = set(synonyms)
try:
synonyms.remove(task)
except KeyError:
pass
return synonyms
@staticmethod
def map_from_synonym(task: str) -> str:
if task in TasksManager._SYNONYM_TASK_MAP:
task = TasksManager._SYNONYM_TASK_MAP[task]
return task
@staticmethod
def _validate_framework_choice(framework: str):
"""
Validates if the framework requested for the export is both correct and available, otherwise throws an
exception.
"""
if framework not in ["pt", "tf"]:
raise ValueError(f"Only two frameworks are supported for export: pt or tf, but {framework} was provided.")
elif framework == "pt" and not is_torch_available():
raise RuntimeError("Cannot export model using PyTorch because no PyTorch package was found.")
elif framework == "tf" and not is_tf_available():
raise RuntimeError("Cannot export model using TensorFlow because no TensorFlow package was found.")
@staticmethod
def get_model_class_for_task(
task: str,
framework: str = "pt",
model_type: Optional[str] = None,
model_class_name: Optional[str] = None,
library: str = "transformers",
) -> Type:
"""
Attempts to retrieve an AutoModel class from a task name.
Args:
task (`str`):
The task required.
framework (`str`, defaults to `"pt"`):
The framework to use for the export.
model_type (`Optional[str]`, defaults to `None`):
The model type to retrieve the model class for. Some architectures need a custom class to be loaded,
and can not be loaded from auto class.
model_class_name (`Optional[str]`, defaults to `None`):
A model class name, allowing to override the default class that would be detected for the task. This
parameter is useful for example for "automatic-speech-recognition", that may map to
AutoModelForSpeechSeq2Seq or to AutoModelForCTC.
library (`str`, defaults to `transformers`):
The library name of the model. Can be any of "transformers", "timm", "diffusers", "sentence_transformers".
Returns:
The AutoModel class corresponding to the task.
"""
task = task.replace("-with-past", "")
task = TasksManager.map_from_synonym(task)
TasksManager._validate_framework_choice(framework)
if (framework, model_type, task) in TasksManager._CUSTOM_CLASSES:
library, class_name = TasksManager._CUSTOM_CLASSES[(framework, model_type, task)]
loaded_library = importlib.import_module(library)
return getattr(loaded_library, class_name)
else:
if framework == "pt":
tasks_to_model_loader = TasksManager._LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP[library]
else:
tasks_to_model_loader = TasksManager._LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP[library]
loaded_library = importlib.import_module(library)
if model_class_name is None:
if task not in tasks_to_model_loader:
raise KeyError(
f"Unknown task: {task}. Possible values are: "
+ ", ".join([f"`{key}` for {tasks_to_model_loader[key]}" for key in tasks_to_model_loader])
)
if isinstance(tasks_to_model_loader[task], str):
model_class_name = tasks_to_model_loader[task]
else:
# automatic-speech-recognition case, which may map to several auto class
if library == "transformers":
if model_type is None:
logger.warning(
f"No model type passed for the task {task}, that may be mapped to several loading"
f" classes ({tasks_to_model_loader[task]}). Defaulting to {tasks_to_model_loader[task][0]}"
" to load the model."
)
model_class_name = tasks_to_model_loader[task][0]
else:
for autoclass_name in tasks_to_model_loader[task]:
module = getattr(loaded_library, autoclass_name)
# TODO: we must really get rid of this - and _ mess
if (
model_type in module._model_mapping._model_mapping
or model_type.replace("-", "_") in module._model_mapping._model_mapping
):
model_class_name = autoclass_name
break
if model_class_name is None:
raise ValueError(
f"Unrecognized configuration classes {tasks_to_model_loader[task]} do not match"
f" with the model type {model_type} and task {task}."
)
else:
raise NotImplementedError(
"For library other than transformers, the _TASKS_TO_MODEL_LOADER mapping should be one to one."
)
return getattr(loaded_library, model_class_name)
@staticmethod
def get_model_files(
model_name_or_path: Union[str, Path],
subfolder: str = "",
cache_dir: str = HUGGINGFACE_HUB_CACHE,
use_auth_token: Optional[str] = None,
revision: Optional[str] = None,
):
request_exception = None
full_model_path = Path(model_name_or_path) / subfolder
if full_model_path.is_dir():
all_files = [
os.path.relpath(os.path.join(dirpath, file), full_model_path)
for dirpath, _, filenames in os.walk(full_model_path)
for file in filenames
]
else:
try:
if not isinstance(model_name_or_path, str):
model_name_or_path = str(model_name_or_path)
all_files = huggingface_hub.list_repo_files(
model_name_or_path,
repo_type="model",
token=use_auth_token,
revision=revision,
)
if subfolder != "":
all_files = [file[len(subfolder) + 1 :] for file in all_files if file.startswith(subfolder)]
except (RequestsConnectionError, huggingface_hub.utils._http.OfflineModeIsEnabled) as e:
request_exception = e
object_id = model_name_or_path.replace("/", "--")
full_model_path = Path(cache_dir, f"models--{object_id}")
if full_model_path.is_dir(): # explore the cache first
# Resolve refs (for instance to convert main to the associated commit sha)
if revision is None:
revision_file = Path(full_model_path, "refs", "main")
revision = ""
if revision_file.is_file():
with open(revision_file) as f:
revision = f.read()
cached_path = Path(full_model_path, "snapshots", revision, subfolder)
all_files = [
os.path.relpath(os.path.join(dirpath, file), cached_path)
for dirpath, _, filenames in os.walk(cached_path)
for file in filenames
]
return all_files, request_exception
@staticmethod
def determine_framework(
model_name_or_path: Union[str, Path],
subfolder: str = "",
framework: Optional[str] = None,
cache_dir: str = HUGGINGFACE_HUB_CACHE,
) -> str:
"""
Determines the framework to use for the export.
The priority is in the following order:
1. User input via `framework`.
2. If local checkpoint is provided, use the same framework as the checkpoint.
3. If model repo, try to infer the framework from the cache if available, else from the Hub.
4. If could not infer, use available framework in environment, with priority given to PyTorch.
Args:
model_name_or_path (`Union[str, Path]`):
Can be either the model id of a model repo on the Hugging Face Hub, or a path to a local directory
containing a model.
subfolder (`str`, defaults to `""`):
In case the model files are located inside a subfolder of the model directory / repo on the Hugging
Face Hub, you can specify the subfolder name here.
framework (`Optional[str]`, *optional*):
The framework to use for the export. See above for priority if none provided.
Returns:
`str`: The framework to use for the export.
"""
if framework is not None:
return framework
all_files, request_exception = TasksManager.get_model_files(model_name_or_path, subfolder, cache_dir)
pt_weight_name = Path(WEIGHTS_NAME).stem
pt_weight_extension = Path(WEIGHTS_NAME).suffix
safe_weight_name = Path(SAFE_WEIGHTS_NAME).stem
safe_weight_extension = Path(SAFE_WEIGHTS_NAME).suffix
is_pt_weight_file = [
(file.startswith(pt_weight_name) and file.endswith(pt_weight_extension))
or (file.startswith(safe_weight_name) and file.endswith(safe_weight_extension))
for file in all_files
]
weight_name = Path(TF2_WEIGHTS_NAME).stem
weight_extension = Path(TF2_WEIGHTS_NAME).suffix
is_tf_weight_file = [file.startswith(weight_name) and file.endswith(weight_extension) for file in all_files]
if any(is_pt_weight_file):
framework = "pt"
elif any(is_tf_weight_file):
framework = "tf"
elif "model_index.json" in all_files and any(
file.endswith((pt_weight_extension, safe_weight_extension)) for file in all_files
):
# stable diffusion case
framework = "pt"
elif "config_sentence_transformers.json" in all_files:
# Sentence Transformers libary relies on PyTorch.
framework = "pt"
else:
if request_exception is not None:
raise RequestsConnectionError(
f"The framework could not be automatically inferred. If using the command-line, please provide the argument --framework (pt,tf) Detailed error: {request_exception}"
)
else:
raise FileNotFoundError(
"Cannot determine framework from given checkpoint location."
f" There should be a {Path(WEIGHTS_NAME).stem}*{Path(WEIGHTS_NAME).suffix} for PyTorch"
f" or {Path(TF2_WEIGHTS_NAME).stem}*{Path(TF2_WEIGHTS_NAME).suffix} for TensorFlow."
)
if is_torch_available():
framework = framework or "pt"
elif is_tf_available():
framework = framework or "tf"
else:
raise EnvironmentError("Neither PyTorch nor TensorFlow found in environment. Cannot export model.")
logger.info(f"Framework not specified. Using {framework} to export the model.")
return framework
@classmethod
def _infer_task_from_model_or_model_class(
cls,
model: Optional[Union["PreTrainedModel", "TFPreTrainedModel"]] = None,
model_class: Optional[Type] = None,
) -> str:
if model is not None and model_class is not None:
raise ValueError("Either a model or a model class must be provided, but both were given here.")
if model is None and model_class is None:
raise ValueError("Either a model or a model class must be provided, but none were given here.")
target_name = model.__class__.__name__ if model is not None else model_class.__name__
task_name = None
iterable = ()
for _, model_loader in cls._LIBRARY_TO_MODEL_LOADERS_TO_TASKS_MAP.items():
iterable += (model_loader.items(),)
for _, model_loader in cls._LIBRARY_TO_TF_MODEL_LOADERS_TO_TASKS_MAP.items():
iterable += (model_loader.items(),)
pt_auto_module = importlib.import_module("transformers.models.auto.modeling_auto")
tf_auto_module = importlib.import_module("transformers.models.auto.modeling_tf_auto")
for auto_cls_name, task in itertools.chain.from_iterable(iterable):
if any(
(
target_name.startswith("Auto"),
target_name.startswith("TFAuto"),
"StableDiffusion" in target_name,
)
):
if target_name == auto_cls_name:
task_name = task
break
continue
module = tf_auto_module if auto_cls_name.startswith("TF") else pt_auto_module
# getattr(module, auto_cls_name)._model_mapping is a _LazyMapping, it also has an attribute called
# "_model_mapping" that is what we want here: class names and not actual classes.
auto_cls = getattr(module, auto_cls_name, None)
# This is the case for StableDiffusionPipeline for instance.
if auto_cls is None:
continue
model_mapping = auto_cls._model_mapping._model_mapping
if target_name in model_mapping.values():
task_name = task
break
if task_name is None:
raise ValueError(f"Could not infer the task name for {target_name}.")
return task_name
@classmethod
def _infer_task_from_model_name_or_path(
cls, model_name_or_path: str, subfolder: str = "", revision: Optional[str] = None
) -> str:
inferred_task_name = None
is_local = os.path.isdir(os.path.join(model_name_or_path, subfolder))
if is_local:
# TODO: maybe implement that.
raise RuntimeError(
f"Cannot infer the task from a local directory yet, please specify the task manually ({', '.join(TasksManager.get_all_tasks())})."
)
else:
if subfolder != "":
raise RuntimeError(
"Cannot infer the task from a model repo with a subfolder yet, please specify the task manually."
)
try:
model_info = huggingface_hub.model_info(model_name_or_path, revision=revision)
except (RequestsConnectionError, huggingface_hub.utils._http.OfflineModeIsEnabled):
raise RuntimeError(
f"Hugging Face Hub is not reachable and we cannot infer the task from a cached model. Make sure you are not offline, or otherwise please specify the `task` (or `--task` in command-line) argument ({', '.join(TasksManager.get_all_tasks())})."
)
library_name = TasksManager.infer_library_from_model(model_name_or_path, subfolder, revision)
if library_name == "diffusers":
if model_info.config["diffusers"].get("class_name", None):
class_name = model_info.config["diffusers"]["class_name"]
elif model_info.config["diffusers"].get("_class_name", None):
class_name = model_info.config["diffusers"]["_class_name"]
else:
raise ValueError(
f"Could not automatically infer the class name for {model_name_or_path}. Please open an issue at https://github.com/huggingface/optimum/issues."
)
inferred_task_name = "stable-diffusion-xl" if "StableDiffusionXL" in class_name else "stable-diffusion"
elif library_name == "timm":
inferred_task_name = "image-classification"
else:
pipeline_tag = getattr(model_info, "pipeline_tag", None)
# The Hub task "conversational" is not a supported task per se, just an alias that may map to
# text-generaton or text2text-generation.
# The Hub task "object-detection" is not a supported task per se, as in Transformers this may map to either
# zero-shot-object-detection or object-detection.
if pipeline_tag is not None and pipeline_tag not in ["conversational", "object-detection"]:
inferred_task_name = TasksManager.map_from_synonym(model_info.pipeline_tag)
else:
transformers_info = model_info.transformersInfo
if transformers_info is not None and transformers_info.get("pipeline_tag") is not None:
inferred_task_name = TasksManager.map_from_synonym(transformers_info["pipeline_tag"])
else:
# transformersInfo does not always have a pipeline_tag attribute
class_name_prefix = ""
if is_torch_available():
tasks_to_automodels = TasksManager._LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP[library_name]
else:
tasks_to_automodels = TasksManager._LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP[library_name]
class_name_prefix = "TF"
auto_model_class_name = transformers_info["auto_model"]
if not auto_model_class_name.startswith("TF"):
auto_model_class_name = f"{class_name_prefix}{auto_model_class_name}"
for task_name, class_name_for_task in tasks_to_automodels.items():
if class_name_for_task == auto_model_class_name:
inferred_task_name = task_name
break
if inferred_task_name is None:
raise KeyError(f"Could not find the proper task name for {auto_model_class_name}.")
return inferred_task_name
@classmethod
def infer_task_from_model(
cls,
model: Union[str, "PreTrainedModel", "TFPreTrainedModel", Type],
subfolder: str = "",
revision: Optional[str] = None,
) -> str:
"""
Infers the task from the model repo.
Args:
model (`str`):
The model to infer the task from. This can either be the name of a repo on the HuggingFace Hub, an
instance of a model, or a model class.
subfolder (`str`, *optional*, defaults to `""`):
In case the model files are located inside a subfolder of the model directory / repo on the Hugging
Face Hub, you can specify the subfolder name here.
revision (`Optional[str]`, defaults to `None`):
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id.
Returns:
`str`: The task name automatically detected from the model repo.
"""
is_torch_pretrained_model = is_torch_available() and isinstance(model, PreTrainedModel)
is_tf_pretrained_model = is_tf_available() and isinstance(model, TFPreTrainedModel)
task = None
if isinstance(model, str):
task = cls._infer_task_from_model_name_or_path(model, subfolder=subfolder, revision=revision)
elif is_torch_pretrained_model or is_tf_pretrained_model:
task = cls._infer_task_from_model_or_model_class(model=model)
elif inspect.isclass(model):
task = cls._infer_task_from_model_or_model_class(model_class=model)
if task is None:
raise ValueError(f"Could not infer the task from {model}.")
return task
@staticmethod
def _infer_library_from_model(
model: Union["PreTrainedModel", "TFPreTrainedModel"], library_name: Optional[str] = None
):
if library_name is not None:
return library_name
# SentenceTransformer models have no config attributes
if hasattr(model, "_model_config"):
library_name = "sentence_transformers"
elif (
hasattr(model, "pretrained_cfg")
or hasattr(model.config, "pretrained_cfg")
or hasattr(model.config, "architecture")
):
library_name = "timm"
elif hasattr(model.config, "_diffusers_version") or getattr(model, "config_name", "") == "model_index.json":
library_name = "diffusers"
else:
library_name = "transformers"
return library_name
@classmethod
def infer_library_from_model(
cls,
model_name_or_path: Union[str, Path],
subfolder: str = "",
revision: Optional[str] = None,
cache_dir: str = HUGGINGFACE_HUB_CACHE,
library_name: Optional[str] = None,
use_auth_token: Optional[str] = None,
):
"""
Infers the library from the model repo.
Args:
model_name_or_path (`str`):
The model to infer the task from. This can either be the name of a repo on the HuggingFace Hub, an
instance of a model, or a model class.
subfolder (`str`, defaults to `""`):
In case the model files are located inside a subfolder of the model directory / repo on the Hugging
Face Hub, you can specify the subfolder name here.
revision (`Optional[str]`, *optional*, defaults to `None`):
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id.
cache_dir (`Optional[str]`, *optional*):
Path to a directory in which a downloaded pretrained model weights have been cached if the standard cache should not be used.
library_name (`Optional[str]`, *optional*):
The library name of the model. Can be any of "transformers", "timm", "diffusers", "sentence_transformers".
use_auth_token (`Optional[str]`, defaults to `None`):
The token to use as HTTP bearer authorization for remote files.
Returns:
`str`: The library name automatically detected from the model repo.
"""
if library_name is not None:
return library_name
all_files, _ = TasksManager.get_model_files(
model_name_or_path, subfolder, cache_dir, use_auth_token=use_auth_token
)
if "model_index.json" in all_files:
library_name = "diffusers"
elif (
any(file_path.startswith("sentence_") for file_path in all_files)
or "config_sentence_transformers.json" in all_files
):
library_name = "sentence_transformers"
elif CONFIG_NAME in all_files:
# We do not use PretrainedConfig.from_pretrained which has unwanted warnings about model type.
kwargs = {
"subfolder": subfolder,
"revision": revision,
"cache_dir": cache_dir,
"use_auth_token": use_auth_token,
}
config_dict, kwargs = PretrainedConfig.get_config_dict(model_name_or_path, **kwargs)
model_config = PretrainedConfig.from_dict(config_dict, **kwargs)
if hasattr(model_config, "pretrained_cfg") or hasattr(model_config, "architecture"):
library_name = "timm"
elif hasattr(model_config, "_diffusers_version"):
library_name = "diffusers"
else:
library_name = "transformers"
else:
library_name = "transformers"
if library_name is None:
raise ValueError(
"The library name could not be automatically inferred. If using the command-line, please provide the argument --library {transformers,diffusers,timm,sentence_transformers}. Example: `--library diffusers`."
)
return library_name
@classmethod
def standardize_model_attributes(
cls,
model: Union["PreTrainedModel", "TFPreTrainedModel"],
library_name: Optional[str] = None,
):
"""
Updates the model for export. This function is suitable to make required changes to the models from different
libraries to follow transformers style.
Args:
model_name_or_path (`Union[str, Path]`):
Can be either the model id of a model repo on the Hugging Face Hub, or a path to a local directory
containing a model.
model (`Union[PreTrainedModel, TFPreTrainedModel]`):
The instance of the model.
subfolder (`str`, defaults to `""`):
In case the model files are located inside a subfolder of the model directory / repo on the Hugging
Face Hub, you can specify the subfolder name here.
revision (`Optional[str]`, *optional*, defaults to `None`):
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id.
cache_dir (`Optional[str]`, *optional*):
Path to a directory in which a downloaded pretrained model weights have been cached if the standard cache should not be used.
library_name (`Optional[str]`, *optional*)::
The library name of the model. Can be any of "transformers", "timm", "diffusers", "sentence_transformers".
"""
library_name = TasksManager._infer_library_from_model(model, library_name)
if library_name == "diffusers":
model.config.export_model_type = "stable-diffusion"
elif library_name == "timm":
# Retrieve model config
model_config = PretrainedConfig.from_dict(model.pretrained_cfg)
# Set config as in transformers
setattr(model, "config", model_config)
# `model_type` is a class attribute in Transformers, let's avoid modifying it.
model.config.export_model_type = model.pretrained_cfg["architecture"]
elif library_name == "sentence_transformers":
if "Transformer" in model[0].__class__.__name__:
model.config = model[0].auto_model.config
model.config.export_model_type = "transformer"
elif "CLIP" in model[0].__class__.__name__:
model.config = model[0].model.config
model.config.export_model_type = "clip"
else:
raise ValueError(
f"The export of a sentence_transformers model with the first module being {model[0].__class__.__name__} is currently not supported in Optimum. Please open an issue or submit a PR to add the support."
)
@staticmethod
def get_all_tasks():
"""
Retrieves all the possible tasks.
Returns:
`List`: all the possible tasks.
"""
tasks = []
if is_torch_available():
mapping = TasksManager._LIBRARY_TO_TASKS_TO_MODEL_LOADER_MAP
else:
mapping = TasksManager._LIBRARY_TO_TF_TASKS_TO_MODEL_LOADER_MAP
tasks = []
for d in mapping.values():
tasks += list(d.keys())
tasks = list(set(tasks))
return tasks
@staticmethod
def get_model_from_task(
task: str,
model_name_or_path: Union[str, Path],
subfolder: str = "",
revision: Optional[str] = None,
framework: Optional[str] = None,
cache_dir: str = HUGGINGFACE_HUB_CACHE,
torch_dtype: Optional["torch.dtype"] = None,
device: Optional[Union["torch.device", str]] = None,
library_name: str = None,
**model_kwargs,
) -> Union["PreTrainedModel", "TFPreTrainedModel"]:
"""
Retrieves a model from its name and the task to be enabled.
Args:
task (`str`):
The task required.
model_name_or_path (`Union[str, Path]`):
Can be either the model id of a model repo on the Hugging Face Hub, or a path to a local directory
containing a model.
subfolder (`str`, defaults to `""`):
In case the model files are located inside a subfolder of the model directory / repo on the Hugging
Face Hub, you can specify the subfolder name here.
revision (`Optional[str]`, *optional*):
Revision is the specific model version to use. It can be a branch name, a tag name, or a commit id.
framework (`Optional[str]`, *optional*):
The framework to use for the export. See `TasksManager.determine_framework` for the priority should
none be provided.
cache_dir (`Optional[str]`, *optional*):
Path to a directory in which a downloaded pretrained model weights have been cached if the standard cache should not be used.
torch_dtype (`Optional[torch.dtype]`, defaults to `None`):
Data type to load the model on. PyTorch-only argument.
device (`Optional[torch.device]`, defaults to `None`):
Device to initialize the model on. PyTorch-only argument. For PyTorch, defaults to "cpu".
model_kwargs (`Dict[str, Any]`, *optional*):
Keyword arguments to pass to the model `.from_pretrained()` method.
library_name (`Optional[str]`, defaults to `None`):
The library name of the model. Can be any of "transformers", "timm", "diffusers", "sentence_transformers". See `TasksManager.infer_library_from_model` for the priority should
none be provided.
Returns:
The instance of the model.
"""
framework = TasksManager.determine_framework(model_name_or_path, subfolder=subfolder, framework=framework)
original_task = task
if task == "auto":
task = TasksManager.infer_task_from_model(model_name_or_path, subfolder=subfolder, revision=revision)
library_name = TasksManager.infer_library_from_model(
model_name_or_path, subfolder, revision, cache_dir, library_name
)
model_type = None
model_class_name = None
kwargs = {"subfolder": subfolder, "revision": revision, "cache_dir": cache_dir, **model_kwargs}
if library_name == "transformers":
config = AutoConfig.from_pretrained(model_name_or_path, **kwargs)
model_type = config.model_type.replace("_", "-")
# TODO: if automatic-speech-recognition is passed as task, it may map to several
# different auto class (AutoModelForSpeechSeq2Seq or AutoModelForCTC),
# depending on the model type
# if original_task in ["auto", "automatic-speech-recognition"]:
if original_task == "automatic-speech-recognition" or task == "automatic-speech-recognition":
if original_task == "auto" and config.architectures is not None:
model_class_name = config.architectures[0]
model_class = TasksManager.get_model_class_for_task(
task, framework, model_type=model_type, model_class_name=model_class_name, library=library_name
)
if library_name == "timm":
model = model_class(f"hf_hub:{model_name_or_path}", pretrained=True, exportable=True)
model = model.to(torch_dtype).to(device)
elif library_name == "sentence_transformers":
cache_folder = model_kwargs.pop("cache_folder", None)
use_auth_token = model_kwargs.pop("use_auth_token", None)
trust_remote_code = model_kwargs.pop("trust_remote_code", False)
model = model_class(
model_name_or_path,
device=device,
cache_folder=cache_folder,
use_auth_token=use_auth_token,
trust_remote_code=trust_remote_code,
)
else:
try:
if framework == "pt":
kwargs["torch_dtype"] = torch_dtype
if isinstance(device, str):
device = torch.device(device)
elif device is None:
device = torch.device("cpu")
# TODO : fix EulerDiscreteScheduler loading to enable for SD models
if version.parse(torch.__version__) >= version.parse("2.0") and library_name != "diffusers":
with device:
# Initialize directly in the requested device, to save allocation time. Especially useful for large
# models to initialize on cuda device.
model = model_class.from_pretrained(model_name_or_path, **kwargs)
else:
model = model_class.from_pretrained(model_name_or_path, **kwargs).to(device)
else:
model = model_class.from_pretrained(model_name_or_path, **kwargs)
except OSError:
if framework == "pt":
logger.info("Loading TensorFlow model in PyTorch before exporting.")
kwargs["from_tf"] = True
model = model_class.from_pretrained(model_name_or_path, **kwargs)
else:
logger.info("Loading PyTorch model in TensorFlow before exporting.")
kwargs["from_pt"] = True
model = model_class.from_pretrained(model_name_or_path, **kwargs)
TasksManager.standardize_model_attributes(model, library_name)
return model
@staticmethod
def get_exporter_config_constructor(
exporter: str,
model: Optional[Union["PreTrainedModel", "TFPreTrainedModel"]] = None,
task: str = "feature-extraction",
model_type: Optional[str] = None,
model_name: Optional[str] = None,
exporter_config_kwargs: Optional[Dict[str, Any]] = None,
library_name: Optional[str] = None,
) -> ExportConfigConstructor:
"""
Gets the `ExportConfigConstructor` for a model (or alternatively for a model type) and task combination.
Args:
exporter (`str`):
The exporter to use.
model (`Optional[Union[PreTrainedModel, TFPreTrainedModel]]`, defaults to `None`):
The instance of the model.
task (`str`, defaults to `"feature-extraction"`):
The task to retrieve the config for.
model_type (`Optional[str]`, defaults to `None`):
The model type to retrieve the config for.
model_name (`Optional[str]`, defaults to `None`):
The name attribute of the model object, only used for the exception message.
exporter_config_kwargs (`Optional[Dict[str, Any]]`, defaults to `None`):
Arguments that will be passed to the exporter config class when building the config constructor.
library_name (`Optional[str]`, defaults to `None`):
The library name of the model. Can be any of "transformers", "timm", "diffusers", "sentence_transformers".
Returns:
`ExportConfigConstructor`: The `ExportConfig` constructor for the requested backend.
"""
if library_name is None:
logger.warning(
"Passing the argument `library_name` to `get_supported_tasks_for_model_type` is required, but got library_name=None. Defaulting to `transformers`. An error will be raised in a future version of Optimum if `library_name` is not provided."
)
# We are screwed if different dictionaries have the same keys.
supported_model_type_for_library = {
**TasksManager._DIFFUSERS_SUPPORTED_MODEL_TYPE,
**TasksManager._TIMM_SUPPORTED_MODEL_TYPE,
**TasksManager._SENTENCE_TRANSFORMERS_SUPPORTED_MODEL_TYPE,
**TasksManager._SUPPORTED_MODEL_TYPE,
}
library_name = "transformers"
else:
supported_model_type_for_library = TasksManager._LIBRARY_TO_SUPPORTED_MODEL_TYPES[library_name]
if model is None and model_type is None:
raise ValueError("Either a model_type or model should be provided to retrieve the export config.")
if model_type is None:
if hasattr(model.config, "export_model_type"):
# We can specifiy a custom `export_model_type` attribute in the config. Useful for timm, sentence_transformers
model_type = model.config.export_model_type
else:
model_type = getattr(model.config, "model_type", None)
if model_type is None:
raise ValueError("Model type cannot be inferred. Please provide the model_type for the model!")
model_type = model_type.replace("_", "-")
model_name = getattr(model, "name", model_name)
model_tasks = TasksManager.get_supported_tasks_for_model_type(
model_type, exporter, model_name=model_name, library_name=library_name
)
if task not in model_tasks:
synonyms = TasksManager.synonyms_for_task(task)
for synonym in synonyms:
if synonym in model_tasks:
task = synonym
break
if task not in model_tasks:
raise ValueError(
f"{model_type} doesn't support task {task} for the {exporter} backend."
f" Supported tasks are: {', '.join(model_tasks.keys())}."
)
if model_type not in supported_model_type_for_library:
model_type = TasksManager._MODEL_TYPE_FOR_DEFAULT_CONFIG[library_name]
exporter_config_constructor = supported_model_type_for_library[model_type][exporter][task]
if exporter_config_kwargs is not None:
exporter_config_constructor = partial(exporter_config_constructor, **exporter_config_kwargs)
return exporter_config_constructor
| optimum/optimum/exporters/tasks.py/0 | {
"file_path": "optimum/optimum/exporters/tasks.py",
"repo_id": "optimum",
"token_count": 41994
} | 326 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from typing import Any, Dict, List, Optional
import numpy as np
import torch
from datasets import load_dataset
"""
Set of utilities for loading most used datasets (original dataset from GPTQ paper) and be able to easily use them during quantization
"""
def prepare_dataset(
examples: List[Dict[str, torch.LongTensor]], batch_size: int = 1, pad_token_id: Optional[int] = None
):
"""
Prepare the dataset by making sure that we have the right format and `batch_size`
Args:
examples (`List[Dict[str, torch.LongTensor]]`):
List of data to prepare
batch_size (`int`, defaults to `1`):
Batch size of the data
pad_token_id (`Optional[int]`, defaults to `None`):
Pad token id of the model
Returns:
` List[Dict[str, torch.LongTensor]]`: Batched dataset
"""
new_examples = []
for example in examples:
input_ids = example["input_ids"]
attention_mask = example["attention_mask"]
new_examples.append(
{"input_ids": torch.LongTensor(input_ids), "attention_mask": torch.LongTensor(attention_mask)}
)
if batch_size > 1 and pad_token_id is None:
raise ValueError(
"You need to pass a `pad_token_id` in `quantize_model` if you want to have examples with batch size > 1"
)
new_examples = [
collate_data(new_examples[start : start + batch_size], contain_labels=False, pad_token_id=pad_token_id)
for start in range(0, len(new_examples), batch_size)
]
return new_examples
def collate_data(
blocks: List[Dict[str, torch.LongTensor]],
contain_labels: bool = False,
pad_token_id: Optional[int] = None,
) -> Dict[str, torch.LongTensor]:
"""
Collate data in `blocks`
Args:
blocks (`List[Dict[str, torch.LongTensor]]`):
List of tensors that we need to batch together
pad_token_id (`Optional[int]`, defaults to `None`):
Pad token id of the model
contain_labels (`bool`, defaults to `False`):
Set True to also process the labels
Returns:
`Dict[str, torch.LongTensor]`: Batched data
"""
def pad_block(block, pads):
return torch.cat((pads.to(block.device), block), dim=-1).long()
input_ids_blocks = [block["input_ids"] for block in blocks]
attention_mask_blocks = [block["attention_mask"] for block in blocks]
if contain_labels:
label_blocks = [block["labels"] for block in blocks]
label_max_len = max([block.size(-1) for block in label_blocks])
bsz = len(blocks)
inp_max_len = max([block.size(-1) for block in input_ids_blocks])
for i in range(bsz):
block_bsz, block_inp_len = input_ids_blocks[i].shape
pad_num = inp_max_len - block_inp_len
if pad_num > 0:
input_ids_blocks[i] = pad_block(input_ids_blocks[i], torch.ones((block_bsz, pad_num)) * pad_token_id)
attention_mask_blocks[i] = pad_block(attention_mask_blocks[i], torch.zeros((block_bsz, pad_num)))
if contain_labels:
block_label_len = label_blocks[i].shape[-1]
label_pad_num = label_max_len - block_label_len
if label_pad_num > 0:
label_blocks[i] = pad_block(label_blocks[i], torch.ones((block_bsz, label_pad_num)) * -100)
data = {
"input_ids": torch.cat(input_ids_blocks, dim=0).long(),
"attention_mask": torch.cat(attention_mask_blocks, dim=0).long(),
}
if contain_labels:
data["labels"] = torch.cat(label_blocks, dim=0).long()
return data
def get_wikitext2(tokenizer: Any, seqlen: int, nsamples: int, split: str = "train"):
if split == "train":
data = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
elif split == "validation":
data = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
# length of 288059 should be enough
text = "".join([" \n" if s == "" else s for s in data["text"][:1000]])
enc = tokenizer(text, return_tensors="pt")
dataset = []
for _ in range(nsamples):
i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = enc.input_ids[:, i:j]
attention_mask = torch.ones_like(inp)
dataset.append({"input_ids": inp, "attention_mask": attention_mask})
return dataset
def get_c4(tokenizer: Any, seqlen: int, nsamples: int, split: str = "train"):
if split == "train":
data = load_dataset("allenai/c4", split="train", data_files={"train": "en/c4-train.00000-of-01024.json.gz"})
elif split == "validation":
data = load_dataset(
"allenai/c4",
split="validation",
data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
)
dataset = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(data) - 1)
enc = tokenizer(data[i]["text"], return_tensors="pt")
if enc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = enc.input_ids[:, i:j]
attention_mask = torch.ones_like(inp)
dataset.append({"input_ids": inp, "attention_mask": attention_mask})
return dataset
def get_c4_new(tokenizer: Any, seqlen: int, nsamples: int, split: str = "train"):
if split == "train":
data = load_dataset("allenai/c4", split="train", data_files={"train": "en/c4-train.00000-of-01024.json.gz"})
elif split == "validation":
data = load_dataset(
"allenai/c4",
split="validation",
data_files={"validation": "en/c4-validation.00000-of-00008.json.gz"},
)
dataset = []
for _ in range(nsamples):
while True:
i = random.randint(0, len(data) - 1)
enc = tokenizer(data[i]["text"], return_tensors="pt")
if enc.input_ids.shape[1] >= seqlen:
break
i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = enc.input_ids[:, i:j]
attention_mask = torch.ones_like(inp)
dataset.append({"input_ids": inp, "attention_mask": attention_mask})
return dataset
def get_ptb(tokenizer: Any, seqlen: int, nsamples: int, split: str = "train"):
if split == "train":
data = load_dataset("ptb_text_only", "penn_treebank", split="train")
elif split == "validation":
data = load_dataset("ptb_text_only", "penn_treebank", split="validation")
enc = tokenizer(" ".join(data["sentence"]), return_tensors="pt")
dataset = []
for _ in range(nsamples):
i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = enc.input_ids[:, i:j]
attention_mask = torch.ones_like(inp)
dataset.append({"input_ids": inp, "attention_mask": attention_mask})
return dataset
def get_ptb_new(tokenizer: Any, seqlen: int, nsamples: int, split: str = "train"):
if split == "train":
data = load_dataset("ptb_text_only", "penn_treebank", split="train")
elif split == "validation":
data = load_dataset("ptb_text_only", "penn_treebank", split="test")
enc = tokenizer(" ".join(data["sentence"]), return_tensors="pt")
dataset = []
for _ in range(nsamples):
i = random.randint(0, enc.input_ids.shape[1] - seqlen - 1)
j = i + seqlen
inp = enc.input_ids[:, i:j]
attention_mask = torch.ones_like(inp)
dataset.append({"input_ids": inp, "attention_mask": attention_mask})
return dataset
def get_dataset(
dataset_name: str, tokenizer: Any, nsamples: int = 128, seqlen: int = 2048, seed: int = 0, split: str = "train"
):
"""
Get the dataset from the original paper of GPTQ
Args:
dataset_name (`str`):
Dataset name. Available options are `['wikitext2', 'c4', 'ptb', 'c4-new', 'ptb_new']`.
tokenizer (`Any`):
Tokenizer of the model
nsamples (`int`, defaults to `128`):
Number of samples
seqlen (`int`, defaults to `2048`):
The sequence length of the model
seed (`int`, defaults to `0`):
Seed
split (`str`, defaults to `train`):
Split of the dataset. Can be either "train" or "validation"
Returns:
`List[Dict[str,torch.LongTensor]]`: The tokenized dataset.
"""
random.seed(seed)
np.random.seed(seed)
torch.random.manual_seed(seed)
get_dataset_map = {
"wikitext2": get_wikitext2,
"c4": get_c4,
"c4-new": get_c4_new,
"ptb": get_ptb,
"ptb-new": get_ptb_new,
}
if split not in ["train", "validation"]:
raise ValueError(f"The split need to be 'train' or 'validation' but found {split}")
if dataset_name not in get_dataset_map:
raise ValueError(f"Expected a value in {list(get_dataset_map.keys())} but found {dataset_name}")
get_dataset_fn = get_dataset_map[dataset_name]
return get_dataset_fn(tokenizer=tokenizer, nsamples=nsamples, seqlen=seqlen, split=split)
| optimum/optimum/gptq/data.py/0 | {
"file_path": "optimum/optimum/gptq/data.py",
"repo_id": "optimum",
"token_count": 4243
} | 327 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from typing import TYPE_CHECKING
import numpy as np
import torch
import onnxruntime as ort
from onnxruntime.capi.onnxruntime_inference_collection import OrtValue
from onnxruntime.transformers.io_binding_helper import TypeHelper as ORTTypeHelper
from ..utils import is_cupy_available, is_onnxruntime_training_available
if TYPE_CHECKING:
from ..modeling_ort import ORTModel
if is_cupy_available():
import cupy as cp
# Adapted from https://github.com/microsoft/onnxruntime/blob/93e0a151177ad8222c2c95f814342bfa27f0a64d/onnxruntime/python/tools/transformers/io_binding_helper.py#L12
class TypeHelper(ORTTypeHelper):
"""
Gets data type information of the ONNX Runtime inference session and provides the mapping from
`OrtValue` data types to the data types of other frameworks (NumPy, PyTorch, etc).
"""
@staticmethod
def ort_type_to_numpy_type(ort_type: str):
ort_type_to_numpy_type_map = {
"tensor(int64)": np.int64,
"tensor(int32)": np.int32,
"tensor(int8)": np.int8,
"tensor(float)": np.float32,
"tensor(float16)": np.float16,
"tensor(bool)": bool,
}
if ort_type in ort_type_to_numpy_type_map:
return ort_type_to_numpy_type_map[ort_type]
else:
raise ValueError(
f"{ort_type} is not supported. Here is a list of supported data type: {ort_type_to_numpy_type_map.keys()}"
)
@staticmethod
def ort_type_to_torch_type(ort_type: str):
ort_type_to_torch_type_map = {
"tensor(int64)": torch.int64,
"tensor(int32)": torch.int32,
"tensor(int8)": torch.int8,
"tensor(float)": torch.float32,
"tensor(float16)": torch.float16,
"tensor(bool)": torch.bool,
}
if ort_type in ort_type_to_torch_type_map:
return ort_type_to_torch_type_map[ort_type]
else:
raise ValueError(
f"{ort_type} is not supported. Here is a list of supported data type: {ort_type_to_torch_type_map.keys()}"
)
# Adapted from https://github.com/microsoft/onnxruntime/blob/1ab11a111ce0717bfbfaca964d04a017cb9b1752/onnxruntime/python/tools/transformers/io_binding_helper.py#L97
class IOBindingHelper:
"""
A helper class to enable `ORTModel` instances to prepare IO binding with dynamic shaped outputs for an inference session and transfer
tensors from ONNX Runtime to other frameworks on device. It helps reduce memory copy between the host and device.
"""
def __init__(self, model: ort.InferenceSession, device, **kwargs):
self.model = model
self.device = device
# Create {name:idx} dict for model inputs and outputs
self.model_inputs = {output_key.name: idx for idx, output_key in enumerate(model.get_inputs())}
self.model_outputs = {output_key.name: idx for idx, output_key in enumerate(model.get_outputs())}
self.model_input_names = list(self.model_inputs.keys())
self.model_output_names = list(self.model_outputs.keys())
@staticmethod
def to_pytorch(ort_value: OrtValue) -> torch.Tensor:
"""
Converts tensors held by OrtValues in ONNX runtime memory buffer to torch tensor.
"""
if is_onnxruntime_training_available():
return IOBindingHelper.to_pytorch_via_dlpack(ort_value)
else:
try:
return IOBindingHelper.to_pytorch_via_cupy(ort_value)
except Exception:
logging.error(traceback.format_exc())
logging.info("Unable to access output memory in CUDA, will offload to CPU")
return IOBindingHelper.to_pytorch_via_numpy(ort_value)
@staticmethod
def to_pytorch_via_numpy(ort_value: OrtValue) -> torch.Tensor:
ort_device = ort_value.device_name().lower()
return torch.tensor(ort_value.numpy()).to(ort_device)
@staticmethod
def to_pytorch_via_cupy(ort_value: OrtValue) -> torch.Tensor:
ort_device = ort_value.device_name().lower()
if ort_device != "cuda":
raise RuntimeError(f"Exchange tensors to PyTorch via CuPy only when device is CUDA, got: {ort_device}")
ort_type = ort_value.data_type()
numpy_type = TypeHelper.ort_type_to_numpy_type(ort_type)
# Access CUDA memory via CuPy
memory = cp.cuda.UnownedMemory(ort_value.data_ptr(), 0, None)
memory_ptr = cp.cuda.MemoryPointer(memory, 0)
cp_array = cp.ndarray(shape=ort_value.shape(), memptr=memory_ptr, dtype=numpy_type)
torch_tensor = torch.from_dlpack(cp_array.toDlpack())
# If is boolean, the dtype will be uint8 and need to be convert back to bool.
if "bool" in ort_type:
torch_tensor = torch_tensor.to(torch.bool)
torch_tensor = torch_tensor.clone()
return torch_tensor
@staticmethod
# dlpack support is available for OrtValue only when `onnxruntime-training` is installed
def to_pytorch_via_dlpack(ort_value: OrtValue) -> torch.Tensor:
from torch._C import _from_dlpack
torch_tensor = _from_dlpack(ort_value.to_dlpack())
return torch_tensor
@staticmethod
def get_device_index(device):
if isinstance(device, str):
# could be 'cuda:0', 'cuda:1', or 'cpu'. with cpu, set index=0
device = torch.device(device)
elif isinstance(device, int):
return device
return 0 if device.index is None else device.index
@staticmethod
def prepare_io_binding(ort_model: "ORTModel", **inputs) -> ort.IOBinding:
"""
Returns an IOBinding object for an inference session. This method is for general purpose, if the inputs and outputs
are determined, you can prepare data buffers directly to avoid tensor transfers across frameworks.
"""
if not all(input_name in inputs.keys() for input_name in ort_model.inputs_names):
raise ValueError(
f"The ONNX model takes {ort_model.inputs_names.keys()} as inputs, but only {inputs.keys()} are given."
)
name_to_np_type = TypeHelper.get_io_numpy_type_map(ort_model.model)
# Bind inputs and outputs to onnxruntime session
io_binding = ort_model.model.io_binding()
# Bind inputs
for input_name in ort_model.inputs_names:
onnx_input = inputs.pop(input_name)
onnx_input = onnx_input.contiguous()
io_binding.bind_input(
input_name,
onnx_input.device.type,
ort_model.device.index,
name_to_np_type[input_name],
list(onnx_input.size()),
onnx_input.data_ptr(),
)
# Bind outputs
for name in ort_model.output_names:
io_binding.bind_output(name, ort_model.device.type, device_id=ort_model.device.index)
return io_binding
| optimum/optimum/onnxruntime/io_binding/io_binding_helper.py/0 | {
"file_path": "optimum/optimum/onnxruntime/io_binding/io_binding_helper.py",
"repo_id": "optimum",
"token_count": 3274
} | 328 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Classes handling quantization with ONNX Runtime."""
import logging
import os
from collections import defaultdict
from pathlib import Path
from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Tuple, Union
import onnx
from datasets import Dataset, load_dataset
from packaging.version import Version, parse
from transformers import AutoConfig
from onnxruntime import __version__ as ort_version
from onnxruntime.quantization import CalibrationDataReader, QuantFormat, QuantizationMode, QuantType
from onnxruntime.quantization.onnx_quantizer import ONNXQuantizer
from onnxruntime.quantization.qdq_quantizer import QDQQuantizer
from ..quantization_base import OptimumQuantizer
from ..utils.save_utils import maybe_save_preprocessors
from . import ORTQuantizableOperator
from .configuration import CalibrationConfig, ORTConfig, QuantizationConfig
from .modeling_ort import ORTModel
from .modeling_seq2seq import ORTModelForConditionalGeneration
from .preprocessors import QuantizationPreprocessor
if TYPE_CHECKING:
from transformers import PretrainedConfig
LOGGER = logging.getLogger(__name__)
class ORTCalibrationDataReader(CalibrationDataReader):
__slots__ = ["batch_size", "dataset", "_dataset_iter"]
def __init__(self, dataset: Dataset, batch_size: int = 1):
if dataset is None:
raise ValueError("Provided dataset is None.")
if batch_size <= 0:
raise ValueError(f"Provided batch_size should be >= 1 (got: {batch_size}).")
self.dataset = dataset
self.batch_size = batch_size
self._dataset_iter = iter(self.dataset)
def get_next(self):
featurized_samples = None
try:
if self.batch_size == 1:
featurized_samples = {key: [value] for key, value in next(self._dataset_iter).items()}
else:
featurized_samples = defaultdict(list)
for _ in range(self.batch_size):
sample = next(self._dataset_iter)
for name, value in sample.items():
featurized_samples[name] += [value]
except StopIteration:
pass
if featurized_samples is not None and len(featurized_samples) > 0:
return featurized_samples
return None
class ORTQuantizer(OptimumQuantizer):
"""
Handles the ONNX Runtime quantization process for models shared on huggingface.co/models.
"""
def __init__(self, onnx_model_path: Path, config: Optional["PretrainedConfig"] = None):
"""
Args:
onnx_model_path (`Path`):
Path to the onnx model files you want to quantize.
config (`Optional[PretrainedConfig]`, defaults to `None`):
The configuration of the model.
"""
super().__init__()
self.onnx_model_path = onnx_model_path
self.config = config
if self.config is None:
try:
self.config = AutoConfig.from_pretrained(self.onnx_model_path.parent)
except OSError:
LOGGER.warning(
f"Could not load the config for {self.onnx_model_path} automatically, this might make "
"the quantized model harder to use because it will not be able to be loaded by an ORTModel without "
"having to specify the configuration explicitly."
)
self._calibrator = None
@classmethod
def from_pretrained(
cls,
model_or_path: Union["ORTModel", str, Path],
file_name: Optional[str] = None,
) -> "ORTQuantizer":
"""
Instantiates a `ORTQuantizer` from an ONNX model file or an `ORTModel`.
Args:
model_or_path (`Union[ORTModel, str, Path]`):
Can be either:
- A path to a saved exported ONNX Intermediate Representation (IR) model, e.g., `./my_model_directory/.
- Or an `ORTModelForXX` class, e.g., `ORTModelForQuestionAnswering`.
file_name(`Optional[str]`, defaults to `None`):
Overwrites the default model file name from `"model.onnx"` to `file_name`.
This allows you to load different model files from the same repository or directory.
Returns:
An instance of `ORTQuantizer`.
"""
ort_quantizer_error_message = "ORTQuantizer does not support multi-file quantization. Please create separate ORTQuantizer instances for each model/file, by passing the argument `file_name` to ORTQuantizer.from_pretrained()."
if isinstance(model_or_path, str):
model_or_path = Path(model_or_path)
path = None
if isinstance(model_or_path, ORTModelForConditionalGeneration):
raise NotImplementedError(ort_quantizer_error_message)
elif isinstance(model_or_path, Path) and file_name is None:
onnx_files = list(model_or_path.glob("*.onnx"))
if len(onnx_files) == 0:
raise FileNotFoundError(f"Could not find any ONNX model file in {model_or_path}")
elif len(onnx_files) > 1:
raise RuntimeError(
f"Found too many ONNX model files in {model_or_path}. {ort_quantizer_error_message}"
)
file_name = onnx_files[0].name
if isinstance(model_or_path, ORTModel):
if path is None:
path = Path(model_or_path.model._model_path)
elif os.path.isdir(model_or_path):
path = Path(model_or_path) / file_name
else:
raise ValueError(f"Unable to load model from {model_or_path}.")
return cls(path)
def fit(
self,
dataset: Dataset,
calibration_config: CalibrationConfig,
onnx_augmented_model_name: Union[str, Path] = "augmented_model.onnx",
operators_to_quantize: Optional[List[str]] = None,
batch_size: int = 1,
use_external_data_format: bool = False,
use_gpu: bool = False,
force_symmetric_range: bool = False,
) -> Dict[str, Tuple[float, float]]:
"""
Performs the calibration step and computes the quantization ranges.
Args:
dataset (`Dataset`):
The dataset to use when performing the calibration step.
calibration_config ([`~CalibrationConfig`]):
The configuration containing the parameters related to the calibration step.
onnx_augmented_model_name (`Union[str, Path]`, defaults to `"augmented_model.onnx"`):
The path used to save the augmented model used to collect the quantization ranges.
operators_to_quantize (`Optional[List[str]]`, defaults to `None`):
List of the operators types to quantize.
batch_size (`int`, defaults to 1):
The batch size to use when collecting the quantization ranges values.
use_external_data_format (`bool`, defaults to `False`):
Whether to use external data format to store model which size is >= 2Gb.
use_gpu (`bool`, defaults to `False`):
Whether to use the GPU when collecting the quantization ranges values.
force_symmetric_range (`bool`, defaults to `False`):
Whether to make the quantization ranges symmetric.
Returns:
The dictionary mapping the nodes name to their quantization ranges.
"""
# If a dataset is provided, then we are in a static quantization mode
LOGGER.info(
f"Using static quantization schema ("
f"dataset: {calibration_config.dataset_name}, method: {calibration_config.method}"
f")"
)
self.partial_fit(
dataset,
calibration_config,
onnx_augmented_model_name,
operators_to_quantize,
batch_size,
use_external_data_format,
use_gpu,
force_symmetric_range,
)
return self.compute_ranges()
def partial_fit(
self,
dataset: Dataset,
calibration_config: CalibrationConfig,
onnx_augmented_model_name: Union[str, Path] = "augmented_model.onnx",
operators_to_quantize: Optional[List[str]] = None,
batch_size: int = 1,
use_external_data_format: bool = False,
use_gpu: bool = False,
force_symmetric_range: bool = False,
):
"""
Performs the calibration step and collects the quantization ranges without computing them.
Args:
dataset (`Dataset`):
The dataset to use when performing the calibration step.
calibration_config (`CalibrationConfig`):
The configuration containing the parameters related to the calibration step.
onnx_augmented_model_name (`Union[str, Path]`, defaults to `"augmented_model.onnx"`):
The path used to save the augmented model used to collect the quantization ranges.
operators_to_quantize (`Optional[List[str]]`, defaults to `None`):
List of the operators types to quantize.
batch_size (`int`, defaults to 1):
The batch size to use when collecting the quantization ranges values.
use_external_data_format (`bool`, defaults to `False`):
Whether uto se external data format to store model which size is >= 2Gb.
use_gpu (`bool`, defaults to `False`):
Whether to use the GPU when collecting the quantization ranges values.
force_symmetric_range (`bool`, defaults to `False`):
Whether to make the quantization ranges symmetric.
"""
# If no calibrator, then create one
if calibration_config.method is not None:
LOGGER.info(f"Creating calibrator: {calibration_config.method}({calibration_config})")
self._calibrator = calibration_config.create_calibrator(
onnx_model_path=self.onnx_model_path.as_posix(),
use_external_data_format=use_external_data_format,
augmented_model_name=onnx_augmented_model_name,
operators_to_quantize=operators_to_quantize,
force_symmetric_range=force_symmetric_range,
)
if use_gpu:
self._calibrator.set_execution_providers(execution_providers=["CUDAExecutionProvider"])
LOGGER.info("Collecting tensors statistics...")
reader = ORTCalibrationDataReader(dataset, batch_size)
self._calibrator.collect_data(reader)
def compute_ranges(self) -> Dict[str, Tuple[float, float]]:
"""
Computes the quantization ranges.
Returns:
The dictionary mapping the nodes name to their quantization ranges.
"""
if self._calibrator is None:
raise ValueError(
"Calibrator is None, please call `partial_fit` or `fit` method at least ones to compute ranges."
)
LOGGER.info("Computing calibration ranges")
if parse(ort_version) >= Version("1.16.0"):
return self._calibrator.compute_data()
return self._calibrator.compute_range()
def quantize(
self,
quantization_config: QuantizationConfig,
save_dir: Union[str, Path],
file_suffix: Optional[str] = "quantized",
calibration_tensors_range: Optional[Dict[str, Tuple[float, float]]] = None,
use_external_data_format: bool = False,
preprocessor: Optional[QuantizationPreprocessor] = None,
) -> Path:
"""
Quantizes a model given the optimization specifications defined in `quantization_config`.
Args:
quantization_config (`QuantizationConfig`):
The configuration containing the parameters related to quantization.
save_dir (`Union[str, Path]`):
The directory where the quantized model should be saved.
file_suffix (`Optional[str]`, defaults to `"quantized"`):
The file_suffix used to save the quantized model.
calibration_tensors_range (`Optional[Dict[str, Tuple[float, float]]]`, defaults to `None`):
The dictionary mapping the nodes name to their quantization ranges, used and required only when applying static quantization.
use_external_data_format (`bool`, defaults to `False`):
Whether to use external data format to store model which size is >= 2Gb.
preprocessor (`Optional[QuantizationPreprocessor]`, defaults to `None`):
The preprocessor to use to collect the nodes to include or exclude from quantization.
Returns:
The path of the resulting quantized model.
"""
use_qdq = quantization_config.is_static and quantization_config.format == QuantFormat.QDQ
save_dir = Path(save_dir)
save_dir.mkdir(parents=True, exist_ok=True)
if quantization_config.is_static and calibration_tensors_range is None:
raise ValueError(
"Requested static quantization in the QuantizationConfig, but no calibration ranges were provided. Please run calibration first using the quantizer fit method, or use dynamic quantization."
)
if not quantization_config.is_static:
if quantization_config.mode != QuantizationMode.IntegerOps:
LOGGER.warning(
f"ONNX Runtime dynamic quantization mode should be QuantizationMode.IntegerOps "
f"(got: {quantization_config.mode})."
)
if quantization_config.activations_dtype != QuantType.QUInt8:
LOGGER.warning(
f"ONNX Runtime dynamic quantization activations data type should be QuantType.QUInt8 "
f"(got: {quantization_config.activations_dtype})."
)
LOGGER.info(
f"Creating {'static' if quantization_config.is_static else 'dynamic'} quantizer: {quantization_config}"
)
if preprocessor is not None:
LOGGER.info("Preprocessor detected, collecting nodes to include/exclude")
nodes_to_quantize, nodes_to_exclude = preprocessor.collect(self.onnx_model_path)
nodes_to_quantize.update(quantization_config.nodes_to_quantize)
nodes_to_exclude.update(quantization_config.nodes_to_exclude)
quantization_config.nodes_to_quantize = list(nodes_to_quantize)
quantization_config.nodes_to_exclude = list(nodes_to_exclude)
has_subgraphs = False
onnx_model = onnx.load(Path(self.onnx_model_path).as_posix())
for node in onnx_model.graph.node:
if node.op_type in ["If", "Loop", "Scan", "SequenceMap"]:
has_subgraphs = True
break
if has_subgraphs:
if quantization_config.is_static:
raise NotImplementedError("Static quantization is currently not supported for models with subgraphs.")
if parse(ort_version) == Version("1.16.0"):
raise ValueError(
"ONNX Runtime version v1.16.0 is not compatible with quantization for models with subgraphs, please downgrade to 1.15.1 or upgrade to a higher version. Reference: https://github.com/microsoft/onnxruntime/pull/17651"
)
quantizer_factory = QDQQuantizer if use_qdq else ONNXQuantizer
if parse(ort_version) >= Version("1.13.0"):
# The argument `input_qType` has been changed into `activation_qType` from ORT 1.13
quantizer = quantizer_factory(
model=onnx_model,
static=quantization_config.is_static,
per_channel=quantization_config.per_channel,
mode=quantization_config.mode,
weight_qType=quantization_config.weights_dtype,
activation_qType=quantization_config.activations_dtype,
tensors_range=calibration_tensors_range,
reduce_range=quantization_config.reduce_range,
nodes_to_quantize=quantization_config.nodes_to_quantize,
nodes_to_exclude=quantization_config.nodes_to_exclude,
op_types_to_quantize=[
operator.value if isinstance(operator, ORTQuantizableOperator) else operator
for operator in quantization_config.operators_to_quantize
],
extra_options={
"WeightSymmetric": quantization_config.weights_symmetric,
"ActivationSymmetric": quantization_config.activations_symmetric,
"EnableSubgraph": has_subgraphs,
"ForceSymmetric": quantization_config.activations_symmetric
and quantization_config.weights_symmetric,
"AddQDQPairToWeight": quantization_config.qdq_add_pair_to_weight,
"DedicatedQDQPair": quantization_config.qdq_dedicated_pair,
"QDQOpTypePerChannelSupportToAxis": quantization_config.qdq_op_type_per_channel_support_to_axis,
},
)
else:
quantizer = quantizer_factory(
model=onnx_model,
static=quantization_config.is_static,
per_channel=quantization_config.per_channel,
mode=quantization_config.mode,
weight_qType=quantization_config.weights_dtype,
input_qType=quantization_config.activations_dtype,
tensors_range=calibration_tensors_range,
reduce_range=quantization_config.reduce_range,
nodes_to_quantize=quantization_config.nodes_to_quantize,
nodes_to_exclude=quantization_config.nodes_to_exclude,
op_types_to_quantize=[
operator.value if isinstance(operator, ORTQuantizableOperator) else operator
for operator in quantization_config.operators_to_quantize
],
extra_options={
"WeightSymmetric": quantization_config.weights_symmetric,
"ActivationSymmetric": quantization_config.activations_symmetric,
"EnableSubgraph": False,
"ForceSymmetric": quantization_config.activations_symmetric
and quantization_config.weights_symmetric,
"AddQDQPairToWeight": quantization_config.qdq_add_pair_to_weight,
"DedicatedQDQPair": quantization_config.qdq_dedicated_pair,
"QDQOpTypePerChannelSupportToAxis": quantization_config.qdq_op_type_per_channel_support_to_axis,
},
)
LOGGER.info("Quantizing model...")
quantizer.quantize_model()
suffix = f"_{file_suffix}" if file_suffix else ""
quantized_model_path = save_dir.joinpath(f"{self.onnx_model_path.stem}{suffix}").with_suffix(".onnx")
LOGGER.info(f"Saving quantized model at: {save_dir} (external data format: " f"{use_external_data_format})")
quantizer.model.save_model_to_file(quantized_model_path.as_posix(), use_external_data_format)
# Create and save the configuration summarizing all the parameters related to quantization
ort_config = ORTConfig(quantization=quantization_config, use_external_data_format=use_external_data_format)
ort_config.save_pretrained(save_dir)
if self.config is not None:
self.config.save_pretrained(save_dir)
maybe_save_preprocessors(self.onnx_model_path.parent, save_dir)
return Path(save_dir)
def get_calibration_dataset(
self,
dataset_name: str,
num_samples: int = 100,
dataset_config_name: Optional[str] = None,
dataset_split: Optional[str] = None,
preprocess_function: Optional[Callable] = None,
preprocess_batch: bool = True,
seed: int = 2016,
use_auth_token: bool = False,
) -> Dataset:
"""
Creates the calibration `datasets.Dataset` to use for the post-training static quantization calibration step.
Args:
dataset_name (`str`):
The dataset repository name on the Hugging Face Hub or path to a local directory containing data files
to load to use for the calibration step.
num_samples (`int`, defaults to 100):
The maximum number of samples composing the calibration dataset.
dataset_config_name (`Optional[str]`, defaults to `None`):
The name of the dataset configuration.
dataset_split (`Optional[str]`, defaults to `None`):
Which split of the dataset to use to perform the calibration step.
preprocess_function (`Optional[Callable]`, defaults to `None`):
Processing function to apply to each example after loading dataset.
preprocess_batch (`bool`, defaults to `True`):
Whether the `preprocess_function` should be batched.
seed (`int`, defaults to 2016):
The random seed to use when shuffling the calibration dataset.
use_auth_token (`bool`, defaults to `False`):
Whether to use the token generated when running `transformers-cli login` (necessary for some datasets
like ImageNet).
Returns:
The calibration `datasets.Dataset` to use for the post-training static quantization calibration
step.
"""
if dataset_name is None:
raise ValueError(
"ORTQuantizer: Static quantization calibration step requires a dataset_name if no calib_dataset is "
"provided."
)
calib_dataset = load_dataset(
dataset_name,
name=dataset_config_name,
split=dataset_split,
use_auth_token=use_auth_token,
)
if num_samples is not None:
num_samples = min(num_samples, len(calib_dataset))
calib_dataset = calib_dataset.shuffle(seed=seed).select(range(num_samples))
if preprocess_function is not None:
processed_calib_dataset = calib_dataset.map(preprocess_function, batched=preprocess_batch)
else:
processed_calib_dataset = calib_dataset
return self.clean_calibration_dataset(processed_calib_dataset)
def clean_calibration_dataset(self, dataset: Dataset) -> Dataset:
model = onnx.load(self.onnx_model_path)
model_inputs = {input.name for input in model.graph.input}
ignored_columns = list(set(dataset.column_names) - model_inputs)
return dataset.remove_columns(ignored_columns)
| optimum/optimum/onnxruntime/quantization.py/0 | {
"file_path": "optimum/optimum/onnxruntime/quantization.py",
"repo_id": "optimum",
"token_count": 10189
} | 329 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from diffusers import ConfigMixin
from diffusers.image_processor import VaeImageProcessor as DiffusersVaeImageProcessor
from diffusers.utils.pil_utils import PIL_INTERPOLATION
from PIL import Image
from tqdm.auto import tqdm
class DiffusionPipelineMixin(ConfigMixin):
# Copied from https://github.com/huggingface/diffusers/blob/v0.12.1/src/diffusers/pipelines/pipeline_utils.py#L812
@staticmethod
def numpy_to_pil(images):
"""
Converts a numpy image or a batch of images to a PIL image.
"""
if images.ndim == 3:
images = images[None, ...]
images = (images * 255).round().astype("uint8")
if images.shape[-1] == 1:
# special case for grayscale (single channel) images
pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
else:
pil_images = [Image.fromarray(image) for image in images]
return pil_images
# Copied from https://github.com/huggingface/diffusers/blob/v0.12.1/src/diffusers/pipelines/pipeline_utils.py#L827
def progress_bar(self, iterable=None, total=None):
if not hasattr(self, "_progress_bar_config"):
self._progress_bar_config = {}
elif not isinstance(self._progress_bar_config, dict):
raise ValueError(
f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
)
if iterable is not None:
return tqdm(iterable, **self._progress_bar_config)
elif total is not None:
return tqdm(total=total, **self._progress_bar_config)
else:
raise ValueError("Either `total` or `iterable` has to be defined.")
# Adapted from https://github.com/huggingface/diffusers/blob/v0.18.1/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion.py#L58
def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
"""
Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
"""
std_text = np.std(noise_pred_text, axis=tuple(range(1, noise_pred_text.ndim)), keepdims=True)
std_cfg = np.std(noise_cfg, axis=tuple(range(1, noise_cfg.ndim)), keepdims=True)
# rescale the results from guidance (fixes overexposure)
noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
# mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
return noise_cfg
class VaeImageProcessor(DiffusersVaeImageProcessor):
# Adapted from diffusers.VaeImageProcessor.denormalize
@staticmethod
def denormalize(images: np.ndarray):
"""
Denormalize an image array to [0,1].
"""
return np.clip(images / 2 + 0.5, 0, 1)
# Adapted from diffusers.VaeImageProcessor.preprocess
def preprocess(
self,
image: Union[torch.FloatTensor, PIL.Image.Image, np.ndarray],
height: Optional[int] = None,
width: Optional[int] = None,
) -> np.ndarray:
"""
Preprocess the image input. Accepted formats are PIL images, NumPy arrays or PyTorch tensors.
"""
supported_formats = (PIL.Image.Image, np.ndarray, torch.Tensor)
do_convert_grayscale = getattr(self.config, "do_convert_grayscale", False)
# Expand the missing dimension for 3-dimensional pytorch tensor or numpy array that represents grayscale image
if do_convert_grayscale and isinstance(image, (torch.Tensor, np.ndarray)) and image.ndim == 3:
if isinstance(image, torch.Tensor):
# if image is a pytorch tensor could have 2 possible shapes:
# 1. batch x height x width: we should insert the channel dimension at position 1
# 2. channnel x height x width: we should insert batch dimension at position 0,
# however, since both channel and batch dimension has same size 1, it is same to insert at position 1
# for simplicity, we insert a dimension of size 1 at position 1 for both cases
image = image.unsqueeze(1)
else:
# if it is a numpy array, it could have 2 possible shapes:
# 1. batch x height x width: insert channel dimension on last position
# 2. height x width x channel: insert batch dimension on first position
if image.shape[-1] == 1:
image = np.expand_dims(image, axis=0)
else:
image = np.expand_dims(image, axis=-1)
if isinstance(image, supported_formats):
image = [image]
elif not (isinstance(image, list) and all(isinstance(i, supported_formats) for i in image)):
raise ValueError(
f"Input is in incorrect format: {[type(i) for i in image]}. Currently, we only support {', '.join(supported_formats)}"
)
if isinstance(image[0], PIL.Image.Image):
if self.config.do_convert_rgb:
image = [self.convert_to_rgb(i) for i in image]
elif do_convert_grayscale:
image = [self.convert_to_grayscale(i) for i in image]
if self.config.do_resize:
height, width = self.get_height_width(image[0], height, width)
image = [self.resize(i, height, width) for i in image]
image = self.reshape(self.pil_to_numpy(image))
else:
if isinstance(image[0], torch.Tensor):
image = [self.pt_to_numpy(elem) for elem in image]
image = np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0)
else:
image = self.reshape(np.concatenate(image, axis=0) if image[0].ndim == 4 else np.stack(image, axis=0))
if do_convert_grayscale and image.ndim == 3:
image = np.expand_dims(image, 1)
# don't need any preprocess if the image is latents
if image.shape[1] == 4:
return image
if self.config.do_resize:
height, width = self.get_height_width(image, height, width)
image = self.resize(image, height, width)
# expected range [0,1], normalize to [-1,1]
do_normalize = self.config.do_normalize
if image.min() < 0 and do_normalize:
warnings.warn(
"Passing `image` as torch tensor with value range in [-1,1] is deprecated. The expected value range for image tensor is [0,1] "
f"when passing as pytorch tensor or numpy Array. You passed `image` with value range [{image.min()},{image.max()}]",
FutureWarning,
)
do_normalize = False
if do_normalize:
image = self.normalize(image)
if getattr(self.config, "do_binarize", False):
image = self.binarize(image)
return image
# Adapted from diffusers.VaeImageProcessor.postprocess
def postprocess(
self,
image: np.ndarray,
output_type: str = "pil",
do_denormalize: Optional[List[bool]] = None,
):
if not isinstance(image, np.ndarray):
raise ValueError(
f"Input for postprocessing is in incorrect format: {type(image)}. We only support np array"
)
if output_type not in ["latent", "np", "pil"]:
deprecation_message = (
f"the output_type {output_type} is outdated and has been set to `np`. Please make sure to set it to one of these instead: "
"`pil`, `np`, `pt`, `latent`"
)
warnings.warn(deprecation_message, FutureWarning)
output_type = "np"
if output_type == "latent":
return image
if do_denormalize is None:
do_denormalize = [self.config.do_normalize] * image.shape[0]
image = np.stack(
[self.denormalize(image[i]) if do_denormalize[i] else image[i] for i in range(image.shape[0])], axis=0
)
image = image.transpose((0, 2, 3, 1))
if output_type == "pil":
image = self.numpy_to_pil(image)
return image
def get_height_width(
self,
image: [PIL.Image.Image, np.ndarray],
height: Optional[int] = None,
width: Optional[int] = None,
):
"""
This function return the height and width that are downscaled to the next integer multiple of
`vae_scale_factor`.
Args:
image(`PIL.Image.Image`, `np.ndarray`):
The image input, can be a PIL image, numpy array or pytorch tensor. if it is a numpy array, should have
shape `[batch, height, width]` or `[batch, height, width, channel]` if it is a pytorch tensor, should
have shape `[batch, channel, height, width]`.
height (`int`, *optional*, defaults to `None`):
The height in preprocessed image. If `None`, will use the height of `image` input.
width (`int`, *optional*`, defaults to `None`):
The width in preprocessed. If `None`, will use the width of the `image` input.
"""
height = height or (image.height if isinstance(image, PIL.Image.Image) else image.shape[-2])
width = width or (image.width if isinstance(image, PIL.Image.Image) else image.shape[-1])
# resize to integer multiple of vae_scale_factor
width, height = (x - x % self.config.vae_scale_factor for x in (width, height))
return height, width
# Adapted from diffusers.VaeImageProcessor.numpy_to_pt
@staticmethod
def numpy_to_pt(images: np.ndarray) -> torch.FloatTensor:
"""
Convert a NumPy image to a PyTorch tensor.
"""
if images.ndim == 3:
images = images[..., None]
images = torch.from_numpy(images)
return images
# Adapted from diffusers.VaeImageProcessor.pt_to_numpy
@staticmethod
def pt_to_numpy(images: torch.FloatTensor) -> np.ndarray:
"""
Convert a PyTorch tensor to a NumPy image.
"""
images = images.cpu().float().numpy()
return images
@staticmethod
def reshape(images: np.ndarray) -> np.ndarray:
"""
Reshape inputs to expected shape.
"""
if images.ndim == 3:
images = images[..., None]
return images.transpose(0, 3, 1, 2)
# TODO : remove after diffusers v0.21.0 release
def resize(
self,
image: [PIL.Image.Image, np.ndarray, torch.Tensor],
height: Optional[int] = None,
width: Optional[int] = None,
) -> [PIL.Image.Image, np.ndarray, torch.Tensor]:
"""
Resize image.
"""
if isinstance(image, PIL.Image.Image):
image = image.resize((width, height), resample=PIL_INTERPOLATION[self.config.resample])
elif isinstance(image, torch.Tensor):
image = torch.nn.functional.interpolate(image, size=(height, width))
elif isinstance(image, np.ndarray):
image = self.numpy_to_pt(image)
image = torch.nn.functional.interpolate(image, size=(height, width))
image = self.pt_to_numpy(image)
return image
| optimum/optimum/pipelines/diffusers/pipeline_utils.py/0 | {
"file_path": "optimum/optimum/pipelines/diffusers/pipeline_utils.py",
"repo_id": "optimum",
"token_count": 5294
} | 330 |
{"from_local":true} | optimum/tests/assets/hub/config.json/0 | {
"file_path": "optimum/tests/assets/hub/config.json",
"repo_id": "optimum",
"token_count": 7
} | 331 |
# coding=utf-8
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import timeit
import pytest
import torch
import transformers
from parameterized import parameterized
from testing_utils import MODELS_DICT, BetterTransformersTestMixin
from transformers import AutoModel, AutoProcessor, AutoTokenizer
from optimum.bettertransformer import BetterTransformer
from optimum.pipelines import pipeline
from optimum.utils.testing_utils import grid_parameters, require_accelerate, require_torch_gpu
class BetterTransformersEncoderTest(BetterTransformersTestMixin):
r"""
Full testing suite of the `BetterTransformers` integration into Hugging Face
`transformers` ecosystem. Check the docstring of each test to understand the
purpose of each test. Basically we test:
- if the conversion dictionnary is consistent, ie if the converted model exists
in HuggingFace `transformers` library.
- if the converted model produces the same logits as the original model.
- if the converted model is faster than the original model.
"""
SUPPORTED_ARCH = [
"albert",
"bert",
"bert-generation",
"camembert",
"data2vec-text",
"distilbert",
"electra",
"ernie",
"layoutlm",
"markuplm",
"rembert",
"roberta",
"rocbert",
"roformer",
"splinter",
"tapas",
"xlm_roberta",
]
FULL_GRID = {
"model_type": SUPPORTED_ARCH,
"keep_original_model": [True, False],
}
def tearDown(self):
gc.collect()
def prepare_inputs_for_class(
self, model_id: str, model_type: str, no_padding: bool = False, batch_size: int = 2, **preprocessor_kwargs
):
# TODO: remove the need for tokenizer
if model_type == "markuplm":
preprocessor = AutoProcessor.from_pretrained(model_id)
else:
preprocessor = AutoTokenizer.from_pretrained(model_id)
if batch_size == 1:
texts = ["a dummy input yeah yeah!"]
elif no_padding:
texts = ["a dummy input yeah yeah!"] * batch_size
else:
texts = ["a dummy input yeah yeah!"] + ["and two"] * (batch_size - 1)
padding = preprocessor_kwargs.pop("padding", True)
if padding == "max_length":
max_length = 25
else:
max_length = None
inputs = preprocessor(
texts, return_tensors="pt", padding=padding, max_length=max_length, **preprocessor_kwargs
)
return inputs
def test_raise_pos_emb(self):
r"""
Test if the converion properly raises an error if the model has an activate function that is
not supported by `BetterTransformer`. For now, only `Bert` family model support this test.
"""
random_config = getattr(transformers, "BertConfig")()
random_config.position_embedding_type = "relative"
with self.assertRaises(ValueError):
hf_model = AutoModel.from_config(random_config).eval()
_ = BetterTransformer.transform(hf_model, keep_original_model=False)
@torch.no_grad()
def test_inference_speed(self):
r"""
The converted models should be at least slightly faster than the native
model. This test aims to check this.
Let's test the inference speed on bert-base-uncased only. If it works for this
model, it should be applicable to all other models, see the test above.
"""
model_name = "bert-base-uncased"
hf_model = AutoModel.from_pretrained(model_name).eval()
bt_model = BetterTransformer.transform(hf_model, keep_original_model=True)
BATCH_SIZE = 8
SEQ_LEN = 16
MAX_SEQ_LEN = 32
STD_SEQ_LEN = 10 # let's take a large sequence length standard deviation
VOCAB_SIZE = 50
N_REPEAT = 10
input_ids, _, attention_mask = get_batch(BATCH_SIZE, SEQ_LEN, MAX_SEQ_LEN, STD_SEQ_LEN, VOCAB_SIZE)
for i in range(1, BATCH_SIZE):
attention_mask[i, SEQ_LEN // 4 :] = 0
mean_hf_time = 0
mean_bt_time = 0
# warmup hf_model
_ = hf_model(input_ids, attention_mask=attention_mask)
# warmup bt_model
_ = bt_model(input_ids, attention_mask=attention_mask)
for _ in range(N_REPEAT):
mean_hf_time += timeit.timeit(lambda: hf_model(input_ids, attention_mask=attention_mask), number=1)
mean_bt_time += timeit.timeit(lambda: bt_model(input_ids, attention_mask=attention_mask), number=1)
mean_hf_time /= N_REPEAT
mean_bt_time /= N_REPEAT
self.assertLess(mean_bt_time, mean_hf_time, "The converted model is slower than the original model.")
gc.collect()
def test_pipeline_on_cpu(self):
r"""
This test runs pipeline together with Better Transformers converted models using optimum `pipeline`.
"""
model_name = "distilbert-base-uncased"
unmasker = pipeline("fill-mask", model_name, accelerator="bettertransformer")
out = unmasker("Hello I'm a [MASK] model.")
self.assertEqual(out[0]["token_str"], "role")
gc.collect()
@require_torch_gpu
@pytest.mark.gpu_test
def test_pipeline_on_gpu(self):
r"""
This test runs pipeline together with Better Transformers converted models using optimum `pipeline`.
"""
model_name = "distilbert-base-uncased"
unmasker = pipeline("fill-mask", model_name, accelerator="bettertransformer", device="cuda:0")
out = unmasker("Hello I'm a [MASK] model.")
self.assertEqual(out[0]["token_str"], "role")
gc.collect()
@require_torch_gpu
@require_accelerate
def check_accelerate_compatibility_cpu_gpu(self, keep_original_model=True, max_memory=None):
r"""
This tests if a model loaded with `accelerate` will be successfully converted
into its BetterTransformers format.
If this works for roberta, it should work for all other models too.
"""
hf_model = AutoModel.from_pretrained("xlm-roberta-base", device_map="auto", max_memory=max_memory).eval()
bt_model = BetterTransformer.transform(
hf_model, keep_original_model=keep_original_model, max_memory=max_memory
)
inputs_ids = torch.LongTensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1]])
attention_mask = torch.Tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0]])
# Check that the model has been dispatched on CPU and GPU
self.assertSetEqual(set(hf_model.hf_device_map.values()), set(max_memory))
self.assertSetEqual(set(bt_model.hf_device_map.values()), set(max_memory))
# Check that the model has weights on GPU and CPU
self.assertEqual(bt_model.encoder.layer[0].in_proj_weight.device, torch.device("cuda:0"))
# Weights that are offloaded on the CPU are offloaded on the `meta` device
if "cpu" in set(max_memory):
self.assertEqual(bt_model.encoder.layer[-1].in_proj_weight.device, torch.device("meta"))
# Forward pass should work
output_bt = bt_model(inputs_ids, attention_mask)
output_hf = hf_model(inputs_ids, attention_mask)
# Assert that the output has been correctly set to the CPU!
self.assertEqual(output_bt[0].device, torch.device("cpu"))
# Final step: check the logits
self.assertTrue(torch.allclose(output_bt[0][0, :3], output_hf[0][0, :3], atol=1e-3))
# Check that the padding has been taken into account correctly - this checks also if the hooks
# have been correctly set.
self.assertTrue(torch.allclose(output_bt[0][1, 3:], torch.zeros_like(output_bt[0][1, 3:])))
gc.collect()
@pytest.mark.gpu_test
@pytest.mark.accelerate_test
def test_accelerate_compatibility_cpu_gpu(self):
r"""
Wrapper around the `check_accelerate_compatibility_cpu_gpu` test with `keep_original_model=True`
"""
max_memory = {0: "1GB", "cpu": "3GB"}
self.check_accelerate_compatibility_cpu_gpu(keep_original_model=True, max_memory=max_memory)
@pytest.mark.gpu_test
@pytest.mark.accelerate_test
def test_accelerate_compatibility_cpu_gpu_without_keeping(self):
r"""
Wrapper around the `check_accelerate_compatibility_cpu_gpu` test with `keep_original_model=False`
"""
max_memory = {0: "1GB", "cpu": "3GB"}
self.check_accelerate_compatibility_cpu_gpu(keep_original_model=False, max_memory=max_memory)
@pytest.mark.gpu_test
@pytest.mark.accelerate_test
def test_accelerate_compatibility_single_gpu(self):
r"""
Wrapper around the `check_accelerate_compatibility_cpu_gpu` test with `keep_original_model=False`
& `max_memory = {0: "2GB"}`
"""
max_memory = {0: "2GB"}
self.check_accelerate_compatibility_cpu_gpu(keep_original_model=True, max_memory=max_memory)
@pytest.mark.gpu_test
@pytest.mark.accelerate_test
def test_accelerate_compatibility_single_gpu_without_keeping(self):
r"""
Wrapper around the `check_accelerate_compatibility_cpu_gpu` test with `keep_original_model=True`
& `max_memory = {0: "2GB"}`
"""
max_memory = {0: "2GB"}
self.check_accelerate_compatibility_cpu_gpu(keep_original_model=False, max_memory=max_memory)
@parameterized.expand(
grid_parameters(
{
"model_type": SUPPORTED_ARCH,
"batch_size": [1, 3],
}
)
)
def test_logits(self, test_name: str, model_type: str, batch_size: int):
# TODO: enable those tests
if model_type in ["rocbert", "splinter", "markuplm", "bert-generation"]:
self.skipTest(f"tiny tokenizers are broken on the Hub {model_type}")
if model_type in ["tapas"]:
self.skipTest(f"{model_type} requires dataframe")
model_id = MODELS_DICT[model_type]
self._test_logits(model_id=model_id, model_type=model_type, batch_size=batch_size)
@parameterized.expand(
grid_parameters(
{
"model_type": SUPPORTED_ARCH,
"batch_size": [1, 3],
}
)
)
def test_logits_backward(self, test_name: str, model_type: str, batch_size: int):
# TODO: enable those tests
if model_type in ["rocbert", "splinter", "markuplm", "bert-generation"]:
self.skipTest(f"tiny tokenizer is broken on the Hub for {model_type}")
if model_type in ["tapas"]:
self.skipTest(f"{model_type} requires dataframe")
model_id = MODELS_DICT[model_type]
self._test_logits_backward(model_id=model_id, model_type=model_type, no_padding=True, batch_size=batch_size)
@parameterized.expand(grid_parameters(FULL_GRID))
def test_invert_modules(self, test_name: str, model_type: str, keep_original_model=False):
model_id = MODELS_DICT[model_type]
self._test_invert_modules(model_id=model_id, keep_original_model=keep_original_model)
@parameterized.expand(grid_parameters(FULL_GRID))
def test_save_load_invertible(self, test_name: str, model_type: str, keep_original_model=False):
model_id = MODELS_DICT[model_type]
self._test_save_load_invertible(model_id=model_id, keep_original_model=keep_original_model)
@parameterized.expand(grid_parameters(FULL_GRID))
def test_invert_model_logits(self, test_name: str, model_type: str, keep_original_model=False):
# TODO: reenable those tests
if model_type in ["rocbert", "splinter", "markuplm", "bert-generation"]:
self.skipTest(f"tiny tokenizers are broken on the Hub {model_type}")
if model_type in ["tapas"]:
self.skipTest(f"{model_type} requires dataframe")
model_id = MODELS_DICT[model_type]
self._test_invert_model_logits(
model_id=model_id, model_type=model_type, keep_original_model=keep_original_model
)
def get_batch(batch_size, avg_seqlen, max_sequence_length, seqlen_stdev, vocab_size, pad_idx=0):
r"""
Utility function to generate a batch of random sequences, together with their
attention mask and lengths.
Copied from: https://github.com/HamidShojanazeri/transformers/blob/ddf0299a13e7c4f54459a0731abd80204a1078f5/examples/pytorch/benchmarking/benchmark_bettertransformer.py#L149
"""
mean_tensor = torch.Tensor([avg_seqlen]).expand(batch_size)
stdev_tensor = torch.Tensor([seqlen_stdev]).expand(batch_size)
lengths = torch.normal(mean_tensor, stdev_tensor).to(torch.int)
# need at least a sequence length of 1 for BetterTransformer to work
lengths = torch.clamp(lengths, min=1, max=max_sequence_length)
tokens = torch.full(
(batch_size, max_sequence_length),
pad_idx,
)
for i in range(batch_size):
tokens[i, : lengths[i]] = torch.randint(
pad_idx + 1,
vocab_size - 1,
size=(lengths[i],),
)
mask = torch.full(
(batch_size, max_sequence_length),
0,
)
for i in range(batch_size):
mask[i, : lengths[i]] = 1
return tokens, lengths, mask
| optimum/tests/bettertransformer/test_encoder.py/0 | {
"file_path": "optimum/tests/bettertransformer/test_encoder.py",
"repo_id": "optimum",
"token_count": 5814
} | 332 |
# coding=utf-8
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tempfile import TemporaryDirectory
from typing import TYPE_CHECKING, Dict, Optional
import pytest
from parameterized import parameterized
from transformers import AutoConfig, is_tf_available
from transformers.testing_utils import require_tf
from optimum.utils import DEFAULT_DUMMY_SHAPES
from ...utils.test_task_processors import TASK_TO_NON_DEFAULT_DATASET
from ..exporters_utils import PYTORCH_EXPORT_MODELS_TINY
if is_tf_available():
from optimum.exporters.tasks import TasksManager
if TYPE_CHECKING:
from optimum.exporters.tasks import ExportConfigConstructor
import subprocess
def _get_models_to_test(export_models_dict: Dict):
models_to_test = []
if is_tf_available():
for model_type, model_names_tasks in export_models_dict.items():
model_type = model_type.replace("_", "-")
try:
task_config_mapping = TasksManager.get_supported_tasks_for_model_type(model_type, "tflite")
except KeyError:
# In this case the model is either not supported, or the contributor forgot to register the
# TFLiteConfig in the TasksManager.
# We check that supported model was left unregistered for a backend in the TasksManager unit tests, so
# we can simply skip in this case here.
continue
if isinstance(model_names_tasks, str): # test export of all tasks on the same model
tasks = list(task_config_mapping.keys())
model_tasks = {model_names_tasks: tasks}
else:
unique_tasks = set()
for tasks in model_names_tasks.values():
for task in tasks:
unique_tasks.add(task)
n_tested_tasks = len(unique_tasks)
if n_tested_tasks != len(task_config_mapping):
raise ValueError(f"Not all tasks are tested for {model_type}.")
model_tasks = model_names_tasks # possibly, test different tasks on different models
for model_name, tasks in model_tasks.items():
for task in tasks:
default_shapes = dict(DEFAULT_DUMMY_SHAPES)
if task == "question-answering":
default_shapes["sequence_length"] = 384
tflite_config_constructor = TasksManager.get_exporter_config_constructor(
model_type=model_type,
exporter="tflite",
task=task,
model_name=model_name,
exporter_config_kwargs=default_shapes,
)
models_to_test.append((f"{model_type}_{task}", model_name, task, tflite_config_constructor))
return sorted(models_to_test)
else:
# Returning some dummy test that should not be ever called because of the @require_torch / @require_tf
# decorators.
# The reason for not returning an empty list is because parameterized.expand complains when it's empty.
return [("dummy", "dummy", "dummy")]
class TFLiteCLIExportTestCase(unittest.TestCase):
"""
Integration tests ensuring supported models are correctly exported.
"""
def _tflite_export(
self,
model_name: str,
tflite_config_constructor: "ExportConfigConstructor",
task: Optional[str] = None,
quantization: Optional[str] = None,
fallback_to_float: bool = False,
inputs_dtype: Optional[str] = None,
outputs_dtype: Optional[str] = None,
calibration_dataset_name_or_path: Optional[str] = None,
calibration_dataset_config_name: Optional[str] = None,
num_calibration_samples: int = 200,
calibration_split: Optional[str] = None,
primary_key: Optional[str] = None,
secondary_key: Optional[str] = None,
question_key: Optional[str] = None,
context_key: Optional[str] = None,
image_key: Optional[str] = None,
):
with TemporaryDirectory() as tmpdir:
command = f"python3 -m optimum.exporters.tflite --model {model_name}".split()
if task is not None:
command.append(f"--task={task}")
if quantization is not None:
command.append(f"--quantize={quantization}")
if fallback_to_float:
command.append("--fallback_to_float")
if inputs_dtype is not None:
command.append(f"--inputs_type={inputs_dtype}")
if outputs_dtype is not None:
command.append(f"--outputs_type={outputs_dtype}")
if calibration_dataset_name_or_path is not None:
command.append(f"--calibration_dataset={calibration_dataset_name_or_path}")
if calibration_dataset_config_name is not None:
command.append(f"--calibration_dataset_config_name={calibration_dataset_config_name}")
if calibration_split is not None:
command.append(f"--calibration_split={calibration_split}")
if primary_key is not None:
command.append(f"--primary_key={primary_key}")
if secondary_key is not None:
command.append(f"--secondary_key={secondary_key}")
if question_key is not None:
command.append(f"--question_key={question_key}")
if context_key is not None:
command.append(f"--context_key={context_key}")
if image_key is not None:
command.append(f"--image_key={image_key}")
command.append(f"--num_calibration_samples={num_calibration_samples}")
tflite_config = tflite_config_constructor(AutoConfig.from_pretrained(model_name))
mandatory_axes = tflite_config.get_mandatory_axes_for_task(task)
shapes = [f"--{name}={getattr(tflite_config, name)}" for name in mandatory_axes]
command += shapes
command.append(tmpdir)
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
outs, errs = proc.communicate()
# Convenient to see the output with -s in PyTest.
print(outs.decode("utf-8"))
print(errs.decode("utf-8"))
not_supported = quantization is not None and (
not tflite_config.supports_quantization_approach(quantization) and not fallback_to_float
)
if not_supported:
self.assertIn("QuantizationApproachNotSupported", errs.decode("utf-8"))
else:
self.assertEqual(proc.returncode, 0)
@pytest.mark.skip("Not supported yet, need to have proper list of models to export to do it")
def test_all_models_tested(self):
pass
# TODO: enable later.
# make sure we test all models
# missing_models_set = TasksManager._SUPPORTED_CLI_MODEL_TYPE - set(PYTORCH_EXPORT_MODELS_TINY.keys())
# if len(missing_models_set) > 0:
# self.fail(f"Not testing all models. Missing models: {missing_models_set}")
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
def test_exporters_cli_tflite(
self, test_name: str, model_name: str, task: str, tflite_config_constructor: "ExportConfigConstructor"
):
self._tflite_export(model_name, tflite_config_constructor, task=task)
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
@pytest.mark.quantization
def test_exporters_cli_tflite_float16_quantization(
self, test_name: str, model_name: str, task: str, tflite_config_constructor: "ExportConfigConstructor"
):
self._tflite_export(model_name, tflite_config_constructor, task=task, quantization="fp16")
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
@pytest.mark.quantization
def test_exporters_cli_tflite_int8_dynamic_quantization(
self,
test_name: str,
model_name: str,
task: str,
tflite_config_constructor: "ExportConfigConstructor",
):
self._tflite_export(model_name, tflite_config_constructor, task=task, quantization="int8-dynamic")
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
@pytest.mark.quantization
def test_exporters_cli_tflite_full_int8_quantization_with_default_dataset(
self,
test_name: str,
model_name: str,
task: str,
tflite_config_constructor: "ExportConfigConstructor",
):
# TODO: currently only 4 tasks are supported.
if task not in TASK_TO_NON_DEFAULT_DATASET:
return
self._tflite_export(
model_name,
tflite_config_constructor,
task=task,
quantization="int8",
num_calibration_samples=3,
inputs_dtype="int8",
outputs_dtype="int8",
)
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
@pytest.mark.quantization
def test_exporters_cli_tflite_int8_quantization_with_default_dataset(
self,
test_name: str,
model_name: str,
task: str,
tflite_config_constructor: "ExportConfigConstructor",
):
# TODO: currently only 4 tasks are supported.
if task not in TASK_TO_NON_DEFAULT_DATASET:
return
self._tflite_export(
model_name, tflite_config_constructor, task=task, quantization="int8", num_calibration_samples=3
)
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
@pytest.mark.quantization
def test_exporters_cli_tflite_int8x16_quantization_with_default_dataset(
self,
test_name: str,
model_name: str,
task: str,
tflite_config_constructor: "ExportConfigConstructor",
):
# TODO: currently only 4 tasks are supported.
if task not in TASK_TO_NON_DEFAULT_DATASET:
return
self._tflite_export(
model_name, tflite_config_constructor, task=task, quantization="int8x16", num_calibration_samples=3
)
@parameterized.expand(_get_models_to_test(PYTORCH_EXPORT_MODELS_TINY))
@require_tf
@pytest.mark.quantization
def test_exporters_cli_tflite_int8_quantization_with_custom_dataset(
self,
test_name: str,
model_name: str,
task: str,
tflite_config_constructor: "ExportConfigConstructor",
):
# TODO: currently only 4 tasks are supported.
if task not in TASK_TO_NON_DEFAULT_DATASET:
return
custom_dataset = TASK_TO_NON_DEFAULT_DATASET[task]["dataset_args"]
config_name = None
if isinstance(custom_dataset, dict):
config_name = custom_dataset.get("name", None)
custom_dataset = custom_dataset["path"]
data_keys = TASK_TO_NON_DEFAULT_DATASET[task]["dataset_data_keys"]
kwargs = {f"{key_name}_key": value for key_name, value in data_keys.items()}
self._tflite_export(
model_name,
tflite_config_constructor,
task=task,
quantization="int8",
calibration_dataset_name_or_path=custom_dataset,
calibration_dataset_config_name=config_name,
num_calibration_samples=3,
**kwargs,
)
@pytest.mark.skip("Not supported yet since we only support the export for BERT")
def test_trust_remote_code(self):
with TemporaryDirectory() as tmpdirname:
out = subprocess.run(
f"python3 -m optimum.exporters.tflite --model fxmarty/tiny-testing-gpt2-remote-code --task text-generation {tmpdirname}",
shell=True,
capture_output=True,
)
self.assertTrue(out.returncode, 1)
self.assertTrue("requires you to execute the modeling file in that repo" in out.stderr.decode("utf-8"))
with TemporaryDirectory() as tmpdirname:
out = subprocess.run(
f"python3 -m optimum.exporters.tflite --trust-remote-code --model fxmarty/tiny-testing-gpt2-remote-code --task text-generation {tmpdirname}",
shell=True,
check=True,
)
| optimum/tests/exporters/tflite/test_exporters_tflite_cli.py/0 | {
"file_path": "optimum/tests/exporters/tflite/test_exporters_tflite_cli.py",
"repo_id": "optimum",
"token_count": 5945
} | 333 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import tempfile
import unittest
from functools import partial
from pathlib import Path
from onnx import load as onnx_load
from onnxruntime import __version__ as ort_version
from onnxruntime.quantization import QuantFormat, QuantizationMode, QuantType
from packaging.version import Version, parse
from parameterized import parameterized
from transformers import AutoTokenizer
from optimum.onnxruntime import (
AutoCalibrationConfig,
AutoQuantizationConfig,
ORTConfig,
ORTModelForCausalLM,
ORTModelForSeq2SeqLM,
ORTModelForSequenceClassification,
ORTQuantizer,
QuantizationConfig,
)
from optimum.utils.testing_utils import grid_parameters
class ORTQuantizerTest(unittest.TestCase):
LOAD_CONFIGURATION = {
"local_asset": {
"model_or_path": "assets/onnx",
},
"local_asset_different_name": {
"model_or_path": "assets/onnx",
"file_name": "different_name.onnx",
},
"ort_model_class": {
"model_or_path": ORTModelForSequenceClassification.from_pretrained(
"optimum/distilbert-base-uncased-finetuned-sst-2-english"
)
},
}
@parameterized.expand(LOAD_CONFIGURATION.items())
def test_from_pretrained_method(self, *args):
_, args = args
quantizer = ORTQuantizer.from_pretrained(**args)
self.assertIsInstance(quantizer, ORTQuantizer)
def test_fail_from_pretrained_method(self):
with self.assertRaises(Exception) as context:
ORTQuantizer.from_pretrained("bert-base-cased")
self.assertIn("Could not find any ONNX model file in bert-base-cased", str(context.exception))
with self.assertRaises(Exception) as context:
model = ORTModelForSeq2SeqLM.from_pretrained("optimum/t5-small")
ORTQuantizer.from_pretrained(model)
self.assertIn("ORTQuantizer does not support multi-file quantization.", str(context.exception))
class ORTDynamicQuantizationTest(unittest.TestCase):
SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS = (
(ORTModelForSequenceClassification, "hf-internal-testing/tiny-random-bert", 30),
(ORTModelForSequenceClassification, "hf-internal-testing/tiny-random-roberta", 30),
(ORTModelForSequenceClassification, "hf-internal-testing/tiny-random-distilbert", 30),
(ORTModelForSequenceClassification, "hf-internal-testing/tiny-random-bart", 32),
)
SUPPORTED_DECODER_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS = (
(ORTModelForCausalLM, "hf-internal-testing/tiny-random-gpt2", 22),
)
@parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS)
def test_dynamic_quantization(self, model_cls, model_name, expected_quantized_matmuls):
qconfig = QuantizationConfig(
is_static=False,
format=QuantFormat.QOperator,
mode=QuantizationMode.IntegerOps,
activations_dtype=QuantType.QUInt8,
weights_dtype=QuantType.QInt8,
per_channel=False,
reduce_range=False,
operators_to_quantize=["MatMul"],
)
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir = Path(tmp_dir)
model = model_cls.from_pretrained(model_name, export=True)
model.save_pretrained(tmp_dir)
quantizer = ORTQuantizer.from_pretrained(model)
quantizer.quantize(save_dir=output_dir, quantization_config=qconfig)
expected_ort_config = ORTConfig(quantization=qconfig)
ort_config = ORTConfig.from_pretrained(tmp_dir)
# Verify the ORTConfig was correctly created and saved
self.assertEqual(ort_config.to_dict(), expected_ort_config.to_dict())
quantized_model = onnx_load(output_dir.joinpath("model_quantized.onnx"))
num_quantized_matmul = 0
for initializer in quantized_model.graph.initializer:
if "MatMul" in initializer.name and "quantized" in initializer.name:
num_quantized_matmul += 1
self.assertEqual(expected_quantized_matmuls, num_quantized_matmul)
gc.collect()
# NOTE: Will be fixed in 1.17.1, reference: https://github.com/microsoft/onnxruntime/pull/19421
@unittest.skipIf(parse(ort_version) == Version("1.17.0"), "not supported with this onnxruntime version")
def test_dynamic_quantization_subgraphs(self):
qconfig = AutoQuantizationConfig.avx512(is_static=False, per_channel=True)
tmp_dir = tempfile.mkdtemp()
output_dir = Path(tmp_dir)
model = ORTModelForCausalLM.from_pretrained("fxmarty/onnx-tiny-random-gpt2-with-merge", use_merged=True)
self.assertTrue(model.use_merged)
model.save_pretrained(tmp_dir)
quantizer = ORTQuantizer.from_pretrained(model)
quantizer.quantize(save_dir=output_dir, quantization_config=qconfig)
expected_ort_config = ORTConfig(quantization=qconfig)
ort_config = ORTConfig.from_pretrained(tmp_dir)
# Verify the ORTConfig was correctly created and saved
self.assertEqual(ort_config.to_dict(), expected_ort_config.to_dict())
quantized_model = onnx_load(output_dir.joinpath("decoder_model_merged_quantized.onnx"))
num_quantized_matmul = 0
for initializer in quantized_model.graph.initializer:
if "weight" in initializer.name and "quantized" in initializer.name:
num_quantized_matmul += 1
self.assertTrue(num_quantized_matmul > 0)
gc.collect()
@parameterized.expand(
grid_parameters(
{"model_arch": SUPPORTED_DECODER_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS, "use_cache": [True, False]}
)
)
def test_decoder_quantization_with_and_without_cache(self, test_name, model_info, use_cache):
model_cls, model_name, expected_quantized_matmuls = model_info
qconfig = AutoQuantizationConfig.avx512(is_static=False, per_channel=True)
model = model_cls.from_pretrained(model_name, export=True, use_cache=use_cache, use_io_binding=use_cache)
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(tmp_dir)
output_dir = Path(tmp_dir)
quantizer = ORTQuantizer.from_pretrained(model)
quantizer.quantize(save_dir=output_dir, quantization_config=qconfig)
expected_ort_config = ORTConfig(quantization=qconfig)
ort_config = ORTConfig.from_pretrained(tmp_dir)
# Verify the ORTConfig was correctly created and saved
self.assertEqual(ort_config.to_dict(), expected_ort_config.to_dict())
quantized_model = onnx_load(output_dir.joinpath("model_quantized.onnx"))
num_quantized_matmul = 0
for initializer in quantized_model.graph.initializer:
if "weight" in initializer.name and "quantized" in initializer.name:
num_quantized_matmul += 1
self.assertEqual(expected_quantized_matmuls, num_quantized_matmul)
gc.collect()
class ORTStaticQuantizationTest(unittest.TestCase):
SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS = (
(ORTModelForSequenceClassification, "hf-internal-testing/tiny-random-bert", 30),
)
@parameterized.expand(SUPPORTED_ARCHITECTURES_WITH_EXPECTED_QUANTIZED_MATMULS)
def test_static_quantization(self, model_cls, model_name, expected_quantized_matmuls):
qconfig = QuantizationConfig(
is_static=True,
format=QuantFormat.QDQ,
mode=QuantizationMode.QLinearOps,
activations_dtype=QuantType.QInt8,
weights_dtype=QuantType.QInt8,
per_channel=False,
reduce_range=False,
operators_to_quantize=["MatMul"],
)
def preprocess_function(examples, tokenizer):
return tokenizer(examples["sentence"], padding="max_length", max_length=128, truncation=True)
with tempfile.TemporaryDirectory() as tmp_dir:
output_dir = Path(tmp_dir)
model = model_cls.from_pretrained(model_name, export=True)
model.save_pretrained(tmp_dir)
tokenizer = AutoTokenizer.from_pretrained(model_name)
quantizer = ORTQuantizer.from_pretrained(model)
calibration_dataset = quantizer.get_calibration_dataset(
"glue",
dataset_config_name="sst2",
preprocess_function=partial(preprocess_function, tokenizer=tokenizer),
num_samples=40,
dataset_split="train",
)
calibration_config = AutoCalibrationConfig.minmax(calibration_dataset)
ranges = quantizer.fit(dataset=calibration_dataset, calibration_config=calibration_config)
quantizer.quantize(
save_dir=output_dir,
calibration_tensors_range=ranges,
quantization_config=qconfig,
)
expected_ort_config = ORTConfig(quantization=qconfig)
ort_config = ORTConfig.from_pretrained(tmp_dir)
# Verify the ORTConfig was correctly created and saved
self.assertEqual(ort_config.to_dict(), expected_ort_config.to_dict())
quantized_model = onnx_load(output_dir.joinpath("model_quantized.onnx"))
num_quantized_matmul = 0
for initializer in quantized_model.graph.initializer:
if "MatMul" in initializer.name and "quantized" in initializer.name:
num_quantized_matmul += 1
self.assertEqual(expected_quantized_matmuls, num_quantized_matmul)
gc.collect()
| optimum/tests/onnxruntime/test_quantization.py/0 | {
"file_path": "optimum/tests/onnxruntime/test_quantization.py",
"repo_id": "optimum",
"token_count": 4483
} | 334 |
from parler_tts import ParlerTTSForConditionalGeneration
from transformers import AutoTokenizer, AutoFeatureExtractor
path = "TODO"
repo_id = "parler_tts_600M"
AutoFeatureExtractor.from_pretrained("ylacombe/dac_44khZ_8kbps").push_to_hub(repo_id)
AutoTokenizer.from_pretrained("google/t5-v1_1-base").push_to_hub(repo_id)
ParlerTTSForConditionalGeneration.from_pretrained(path).push_to_hub(repo_id)
| parler-tts/helpers/push_to_hub_scripts/push_trained_parler_tts_to_hub.py/0 | {
"file_path": "parler-tts/helpers/push_to_hub_scripts/push_trained_parler_tts_to_hub.py",
"repo_id": "parler-tts",
"token_count": 157
} | 335 |
# Minimal makefile for Sphinx documentation
#
# You can set these variables from the command line.
SPHINXOPTS =
SPHINXBUILD = sphinx-build
SOURCEDIR = source
BUILDDIR = _build
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
.PHONY: help Makefile
# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) | peft/docs/Makefile/0 | {
"file_path": "peft/docs/Makefile",
"repo_id": "peft",
"token_count": 237
} | 336 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quantization
Quantization represents data with fewer bits, making it a useful technique for reducing memory-usage and accelerating inference especially when it comes to large language models (LLMs). There are several ways to quantize a model including:
* optimizing which model weights are quantized with the [AWQ](https://hf.co/papers/2306.00978) algorithm
* independently quantizing each row of a weight matrix with the [GPTQ](https://hf.co/papers/2210.17323) algorithm
* quantizing to 8-bit and 4-bit precision with the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library
* quantizing to as low as 2-bit precision with the [AQLM](https://arxiv.org/abs/2401.06118) algorithm
However, after a model is quantized it isn't typically further trained for downstream tasks because training can be unstable due to the lower precision of the weights and activations. But since PEFT methods only add *extra* trainable parameters, this allows you to train a quantized model with a PEFT adapter on top! Combining quantization with PEFT can be a good strategy for training even the largest models on a single GPU. For example, [QLoRA](https://hf.co/papers/2305.14314) is a method that quantizes a model to 4-bits and then trains it with LoRA. This method allows you to finetune a 65B parameter model on a single 48GB GPU!
In this guide, you'll see how to quantize a model to 4-bits and train it with LoRA.
## Quantize a model
[bitsandbytes](https://github.com/TimDettmers/bitsandbytes) is a quantization library with a Transformers integration. With this integration, you can quantize a model to 8 or 4-bits and enable many other options by configuring the [`~transformers.BitsAndBytesConfig`] class. For example, you can:
* set `load_in_4bit=True` to quantize the model to 4-bits when you load it
* set `bnb_4bit_quant_type="nf4"` to use a special 4-bit data type for weights initialized from a normal distribution
* set `bnb_4bit_use_double_quant=True` to use a nested quantization scheme to quantize the already quantized weights
* set `bnb_4bit_compute_dtype=torch.bfloat16` to use bfloat16 for faster computation
```py
import torch
from transformers import BitsAndBytesConfig
config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16,
)
```
Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config)
```
Next, you should call the [`~peft.utils.prepare_model_for_kbit_training`] function to preprocess the quantized model for training.
```py
from peft import prepare_model_for_kbit_training
model = prepare_model_for_kbit_training(model)
```
Now that the quantized model is ready, let's set up a configuration.
## LoraConfig
Create a [`LoraConfig`] with the following parameters (or choose your own):
```py
from peft import LoraConfig
config = LoraConfig(
r=16,
lora_alpha=8,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
```
Then use the [`get_peft_model`] function to create a [`PeftModel`] from the quantized model and configuration.
```py
from peft import get_peft_model
model = get_peft_model(model, config)
```
You're all set for training with whichever training method you prefer!
### LoftQ initialization
[LoftQ](https://hf.co/papers/2310.08659) initializes LoRA weights such that the quantization error is minimized, and it can improve performance when training quantized models. To get started, follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/loftq_finetuning).
In general, for LoftQ to work best, it is recommended to target as many layers with LoRA as possible, since those not targeted cannot have LoftQ applied. This means that passing `LoraConfig(..., target_modules="all-linear")` will most likely give the best results. Also, you should use `nf4` as quant type in your quantization config when using 4bit quantization, i.e. `BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")`.
### QLoRA-style training
QLoRA adds trainable weights to all the linear layers in the transformer architecture. Since the attribute names for these linear layers can vary across architectures, set `target_modules` to `"all-linear"` to add LoRA to all the linear layers:
```py
config = LoraConfig(target_modules="all-linear", ...)
```
## AQLM quantization
Additive Quantization of Language Models ([AQLM](https://arxiv.org/abs/2401.06118)) is a Large Language Models compression method. It quantizes multiple weights together and takes advantage of interdependencies between them. AQLM represents groups of 8-16 weights as a sum of multiple vector codes. This allows it to compress models down to as low as 2-bit with considerably low accuracy losses.
Since the AQLM quantization process is computationally expensive, a use of prequantized models is recommended. A partial list of available models can be found in the official aqlm [repository](https://github.com/Vahe1994/AQLM).
The models support LoRA adapter tuning. To tune the quantized model you'll need to install the `aqlm` inference library: `pip install aqlm>=1.0.2`. Finetuned LoRA adapters shall be saved separately, as merging them with AQLM quantized weights is not possible.
```py
quantized_model = AutoModelForCausalLM.from_pretrained(
"BlackSamorez/Mixtral-8x7b-AQLM-2Bit-1x16-hf-test-dispatch",
torch_dtype="auto", device_map="auto", low_cpu_mem_usage=True,
)
peft_config = LoraConfig(...)
quantized_model = get_peft_model(quantized_model, peft_config)
```
You can refer to the [Google Colab](https://colab.research.google.com/drive/12GTp1FCj5_0SnnNQH18h_2XFh9vS_guX?usp=sharing) example for an overview of AQLM+LoRA finetuning.
## EETQ quantization
You can also perform LoRA fine-tuning on EETQ quantized models. [EETQ](https://github.com/NetEase-FuXi/EETQ) package offers simple and efficient way to perform 8-bit quantization, which is claimed to be faster than the `LLM.int8()` algorithm. First, make sure that you have a transformers version that is compatible with EETQ (e.g. by installing it from latest pypi or from source).
```py
import torch
from transformers import EetqConfig
config = EetqConfig("int8")
```
Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config)
```
and create a `LoraConfig` and pass it to `get_peft_model`:
```py
from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=16,
lora_alpha=8,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
model = get_peft_model(model, config)
```
## HQQ quantization
The models that is quantized using Half-Quadratic Quantization of Large Machine Learning Models ([HQQ](https://mobiusml.github.io/hqq_blog/)) support LoRA adapter tuning. To tune the quantized model, you'll need to install the `hqq` library with: `pip install hqq`.
```py
from hqq.engine.hf import HQQModelForCausalLM
quantized_model = HQQModelForCausalLM.from_quantized(save_dir_or_hfhub, device='cuda')
peft_config = LoraConfig(...)
quantized_model = get_peft_model(quantized_model, peft_config)
```
Or using transformers version that is compatible with HQQ (e.g. by installing it from latest pypi or from source).
```python
from transformers import HqqConfig, AutoModelForCausalLM
quant_config = HqqConfig(nbits=4, group_size=64)
quantized_model = AutoModelForCausalLM.from_pretrained(save_dir_or_hfhub, device='cuda', quantization_config=quant_config)
peft_config = LoraConfig(...)
quantized_model = get_peft_model(quantized_model, peft_config)
```
## Next steps
If you're interested in learning more about quantization, the following may be helpful:
* Learn more about details about QLoRA and check out some benchmarks on its impact in the [Making LLMs even more accessible with bitsandbytes, 4-bit quantization and QLoRA](https://huggingface.co/blog/4bit-transformers-bitsandbytes) blog post.
* Read more about different quantization schemes in the Transformers [Quantization](https://hf.co/docs/transformers/main/quantization) guide.
| peft/docs/source/developer_guides/quantization.md/0 | {
"file_path": "peft/docs/source/developer_guides/quantization.md",
"repo_id": "peft",
"token_count": 2857
} | 337 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# PEFT configurations and models
The sheer size of today's large pretrained models - which commonly have billions of parameters - present a significant training challenge because they require more storage space and more computational power to crunch all those calculations. You'll need access to powerful GPUs or TPUs to train these large pretrained models which is expensive, not widely accessible to everyone, not environmentally friendly, and not very practical. PEFT methods address many of these challenges. There are several types of PEFT methods (soft prompting, matrix decomposition, adapters), but they all focus on the same thing, reduce the number of trainable parameters. This makes it more accessible to train and store large models on consumer hardware.
The PEFT library is designed to help you quickly train large models on free or low-cost GPUs, and in this tutorial, you'll learn how to setup a configuration to apply a PEFT method to a pretrained base model for training. Once the PEFT configuration is setup, you can use any training framework you like (Transformer's [`~transformers.Trainer`] class, [Accelerate](https://hf.co/docs/accelerate), a custom PyTorch training loop).
## PEFT configurations
<Tip>
Learn more about the parameters you can configure for each PEFT method in their respective API reference page.
</Tip>
A configuration stores important parameters that specify how a particular PEFT method should be applied.
For example, take a look at the following [`LoraConfig`](https://huggingface.co/ybelkada/opt-350m-lora/blob/main/adapter_config.json) for applying LoRA and [`PromptEncoderConfig`](https://huggingface.co/smangrul/roberta-large-peft-p-tuning/blob/main/adapter_config.json) for applying p-tuning (these configuration files are already JSON-serialized). Whenever you load a PEFT adapter, it is a good idea to check whether it has an associated adapter_config.json file which is required.
<hfoptions id="config">
<hfoption id="LoraConfig">
```json
{
"base_model_name_or_path": "facebook/opt-350m", #base model to apply LoRA to
"bias": "none",
"fan_in_fan_out": false,
"inference_mode": true,
"init_lora_weights": true,
"layers_pattern": null,
"layers_to_transform": null,
"lora_alpha": 32,
"lora_dropout": 0.05,
"modules_to_save": null,
"peft_type": "LORA", #PEFT method type
"r": 16,
"revision": null,
"target_modules": [
"q_proj", #model modules to apply LoRA to (query and value projection layers)
"v_proj"
],
"task_type": "CAUSAL_LM" #type of task to train model on
}
```
You can create your own configuration for training by initializing a [`LoraConfig`].
```py
from peft import LoraConfig, TaskType
lora_config = LoraConfig(
r=16,
target_modules=["q_proj", "v_proj"],
task_type=TaskType.CAUSAL_LM,
lora_alpha=32,
lora_dropout=0.05
)
```
</hfoption>
<hfoption id="PromptEncoderConfig">
```json
{
"base_model_name_or_path": "roberta-large", #base model to apply p-tuning to
"encoder_dropout": 0.0,
"encoder_hidden_size": 128,
"encoder_num_layers": 2,
"encoder_reparameterization_type": "MLP",
"inference_mode": true,
"num_attention_heads": 16,
"num_layers": 24,
"num_transformer_submodules": 1,
"num_virtual_tokens": 20,
"peft_type": "P_TUNING", #PEFT method type
"task_type": "SEQ_CLS", #type of task to train model on
"token_dim": 1024
}
```
You can create your own configuration for training by initializing a [`PromptEncoderConfig`].
```py
from peft import PromptEncoderConfig, TaskType
p_tuning_config = PromptEncoderConfig(
encoder_reprameterization_type="MLP",
encoder_hidden_size=128,
num_attention_heads=16,
num_layers=24,
num_transformer_submodules=1,
num_virtual_tokens=20,
token_dim=1024,
task_type=TaskType.SEQ_CLS
)
```
</hfoption>
</hfoptions>
## PEFT models
With a PEFT configuration in hand, you can now apply it to any pretrained model to create a [`PeftModel`]. Choose from any of the state-of-the-art models from the [Transformers](https://hf.co/docs/transformers) library, a custom model, and even new and unsupported transformer architectures.
For this tutorial, load a base [facebook/opt-350m](https://huggingface.co/facebook/opt-350m) model to finetune.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("facebook/opt-350m")
```
Use the [`get_peft_model`] function to create a [`PeftModel`] from the base facebook/opt-350m model and the `lora_config` you created earlier.
```py
from peft import get_peft_model
lora_model = get_peft_model(model, lora_config)
lora_model.print_trainable_parameters()
"trainable params: 1,572,864 || all params: 332,769,280 || trainable%: 0.472659014678278"
```
Now you can train the [`PeftModel`] with your preferred training framework! After training, you can save your model locally with [`~PeftModel.save_pretrained`] or upload it to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method.
```py
# save locally
lora_model.save_pretrained("your-name/opt-350m-lora")
# push to Hub
lora_model.push_to_hub("your-name/opt-350m-lora")
```
To load a [`PeftModel`] for inference, you'll need to provide the [`PeftConfig`] used to create it and the base model it was trained from.
```py
from peft import PeftModel, PeftConfig
config = PeftConfig.from_pretrained("ybelkada/opt-350m-lora")
model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path)
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora")
```
<Tip>
By default, the [`PeftModel`] is set for inference, but if you'd like to train the adapter some more you can set `is_trainable=True`.
```py
lora_model = PeftModel.from_pretrained(model, "ybelkada/opt-350m-lora", is_trainable=True)
```
</Tip>
The [`PeftModel.from_pretrained`] method is the most flexible way to load a [`PeftModel`] because it doesn't matter what model framework was used (Transformers, timm, a generic PyTorch model). Other classes, like [`AutoPeftModel`], are just a convenient wrapper around the base [`PeftModel`], and makes it easier to load PEFT models directly from the Hub or locally where the PEFT weights are stored.
```py
from peft import AutoPeftModelForCausalLM
lora_model = AutoPeftModelForCausalLM.from_pretrained("ybelkada/opt-350m-lora")
```
Take a look at the [AutoPeftModel](package_reference/auto_class) API reference to learn more about the [`AutoPeftModel`] classes.
## Next steps
With the appropriate [`PeftConfig`], you can apply it to any pretrained model to create a [`PeftModel`] and train large powerful models faster on freely available GPUs! To learn more about PEFT configurations and models, the following guide may be helpful:
* Learn how to configure a PEFT method for models that aren't from Transformers in the [Working with custom models](../developer_guides/custom_models) guide.
| peft/docs/source/tutorial/peft_model_config.md/0 | {
"file_path": "peft/docs/source/tutorial/peft_model_config.md",
"repo_id": "peft",
"token_count": 2415
} | 338 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Any, Dict, Optional, Tuple, Union
import torch
from diffusers.models import UNet2DConditionModel
from diffusers.utils import BaseOutput, logging
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
@dataclass
class UNet2DConditionOutput(BaseOutput):
"""
Args:
sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model.
"""
sample: torch.FloatTensor
class UNet2DConditionNewModel(UNet2DConditionModel):
def forward(
self,
sample: torch.FloatTensor,
timestep: Union[torch.Tensor, float, int],
encoder_hidden_states: torch.Tensor,
guided_hint: Optional[torch.Tensor] = None,
class_labels: Optional[torch.Tensor] = None,
timestep_cond: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
cross_attention_kwargs: Optional[Dict[str, Any]] = None,
added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
mid_block_additional_residual: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
return_dict: bool = True,
) -> Union[UNet2DConditionOutput, Tuple]:
r"""
Args:
sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
encoder_attention_mask (`torch.Tensor`):
(batch, sequence_length) cross-attention mask, applied to encoder_hidden_states. True = keep, False =
discard. Mask will be converted into a bias, which adds large negative values to attention scores
corresponding to "discard" tokens.
return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
cross_attention_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
`self.processor` in
[diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
added_cond_kwargs (`dict`, *optional*):
A kwargs dictionary that if specified includes additonal conditions that can be used for additonal time
embeddings or encoder hidden states projections. See the configurations `encoder_hid_dim_type` and
`addition_embed_type` for more information.
Returns:
[`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
[`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
returning a tuple, the first element is the sample tensor.
"""
# By default samples have to be AT least a multiple of the overall upsampling factor.
# The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
# However, the upsampling interpolation output size can be forced to fit any upsampling size
# on the fly if necessary.
default_overall_up_factor = 2**self.num_upsamplers
# upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
forward_upsample_size = False
upsample_size = None
if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
logger.info("Forward upsample size to force interpolation output size.")
forward_upsample_size = True
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None:
encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 0. center input if necessary
if self.config.center_input_sample:
sample = 2 * sample - 1.0
# 1. time
timesteps = timestep
if not torch.is_tensor(timesteps):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
is_mps = sample.device.type == "mps"
if isinstance(timestep, float):
dtype = torch.float32 if is_mps else torch.float64
else:
dtype = torch.int32 if is_mps else torch.int64
timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
elif len(timesteps.shape) == 0:
timesteps = timesteps[None].to(sample.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
timesteps = timesteps.expand(sample.shape[0])
t_emb = self.time_proj(timesteps)
# `Timesteps` does not contain any weights and will always return f32 tensors
# but time_embedding might actually be running in fp16. so we need to cast here.
# there might be better ways to encapsulate this.
t_emb = t_emb.to(dtype=sample.dtype)
emb = self.time_embedding(t_emb, timestep_cond)
if self.class_embedding is not None:
if class_labels is None:
raise ValueError("class_labels should be provided when num_class_embeds > 0")
if self.config.class_embed_type == "timestep":
class_labels = self.time_proj(class_labels)
# `Timesteps` does not contain any weights and will always return f32 tensors
# there might be better ways to encapsulate this.
class_labels = class_labels.to(dtype=sample.dtype)
class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
if self.config.class_embeddings_concat:
emb = torch.cat([emb, class_emb], dim=-1)
else:
emb = emb + class_emb
if self.config.addition_embed_type == "text":
aug_emb = self.add_embedding(encoder_hidden_states)
emb = emb + aug_emb
elif self.config.addition_embed_type == "text_image":
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
)
image_embs = added_cond_kwargs.get("image_embeds")
text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
aug_emb = self.add_embedding(text_embs, image_embs)
emb = emb + aug_emb
if self.time_embed_act is not None:
emb = self.time_embed_act(emb)
if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
# Kadinsky 2.1 - style
if "image_embeds" not in added_cond_kwargs:
raise ValueError(
f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
)
image_embeds = added_cond_kwargs.get("image_embeds")
encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
# 2. pre-process and insert conditioning (ControlNet)
# Note: the added "guided_hint" is the only difference between this implementation and the original UNet2DConditionModel
sample = self.conv_in(sample)
sample = guided_hint + sample if guided_hint is not None else sample
# 3. down
down_block_res_samples = (sample,)
for downsample_block in self.down_blocks:
if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
sample, res_samples = downsample_block(
hidden_states=sample,
temb=emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
new_down_block_res_samples = ()
for down_block_res_sample, down_block_additional_residual in zip(
down_block_res_samples, down_block_additional_residuals
):
down_block_res_sample = down_block_res_sample + down_block_additional_residual
new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
down_block_res_samples = new_down_block_res_samples
# 4. mid
if self.mid_block is not None:
sample = self.mid_block(
sample,
emb,
encoder_hidden_states=encoder_hidden_states,
attention_mask=attention_mask,
cross_attention_kwargs=cross_attention_kwargs,
encoder_attention_mask=encoder_attention_mask,
)
if mid_block_additional_residual is not None:
sample = sample + mid_block_additional_residual
# 5. up
for i, upsample_block in enumerate(self.up_blocks):
is_final_block = i == len(self.up_blocks) - 1
res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
# if we have not reached the final block and need to forward the
# upsample size, we do it here
if not is_final_block and forward_upsample_size:
upsample_size = down_block_res_samples[-1].shape[2:]
if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
sample = upsample_block(
hidden_states=sample,
temb=emb,
res_hidden_states_tuple=res_samples,
encoder_hidden_states=encoder_hidden_states,
cross_attention_kwargs=cross_attention_kwargs,
upsample_size=upsample_size,
attention_mask=attention_mask,
encoder_attention_mask=encoder_attention_mask,
)
else:
sample = upsample_block(
hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
)
# 6. post-process
if self.conv_norm_out:
sample = self.conv_norm_out(sample)
sample = self.conv_act(sample)
sample = self.conv_out(sample)
if not return_dict:
return (sample,)
return UNet2DConditionOutput(sample=sample)
| peft/examples/boft_controlnet/utils/unet_2d_condition.py/0 | {
"file_path": "peft/examples/boft_controlnet/utils/unet_2d_condition.py",
"repo_id": "peft",
"token_count": 5908
} | 339 |
<jupyter_start><jupyter_text>Training PEFT models with new tokens being added to the embedding layers and tokenizerIn this example, we will learn how to train a LoRA model when adding new tokens to the tokenizer and model. This is a common usecase when doing the following:1. Instruction finetuning with new tokens beind added such as ``, ``, ``, ``, `` to properly format the conversations2. Finetuning on a specific language wherein language spoecific tokens are added, e.g., korean tokens being added to vocabulary for finetuning LLM on Korean datasets.3. Instruction finetuning to return outputs in certain format to enable agent behaviour new tokens such as ``, ``, ``, ``, ``, ``, ``.In such cases, you add the Embedding modules to the LORA `target_modules`. PEFT will take care of saving the embedding layers with the new added tokens along with the adapter weights that were trained on the specific initialization of the embeddings weights of the added tokens. Let's import the necessary libraries<jupyter_code>import os
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
os.environ["WANDB_PROJECT"] = "PeftExamples"
import transformers
from peft import (
LoraConfig,
PeftConfig,
PeftModel,
get_peft_model,
prepare_model_for_kbit_training,
)
from transformers import (
AutoModelForCausalLM,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
Trainer,
default_data_collator,
)
import torch
from dataclasses import dataclass, field
from typing import Optional
from dataclass_csv import DataclassReader
from torch.utils.data import Dataset, DataLoader
from enum import Enum<jupyter_output><empty_output><jupyter_text>Prepare Model and Tokenizer Now, we will be adding 27 new tokens as well as replace the existing pad, bos and eos tokens of the model.<jupyter_code>class SpecialTokens(str, Enum):
begin_target = "<|begintarget|>"
end_target = "<|endtarget|>"
begin_context = "<|begincontext|>"
end_context = "<|endcontext|>"
system = "<|system|>"
user = "<|user|>"
begin_last_user_utterance = "<|beginlastuserutterance|>"
end_last_user_utterance = "<|endlastuserutterance|>"
begin_dsts = "<|begindsts|>"
end_dsts = "<|enddsts|>"
begin_dst = "<|begindst|>"
end_dst = "<|enddst|>"
begin_belief = "<|beginbelief|>"
end_belief = "<|endbelief|>"
begin_response = "<|beginresponse|>"
end_response = "<|endresponse|>"
begin_action = "<|beginaction|>"
end_action = "<|endaction|>"
begin_user_action = "<|beginuseraction|>"
end_user_action = "<|enduseraction|>"
sys_actions = "<|sysactions|>"
begin_intent = "<|beginintent|>"
end_intent = "<|endintent|>"
begin_requested_slots = "<|beginrequestedslots|>"
end_requested_slots = "<|endrequestedslots|>"
pad_token = "<|pad|>"
bos_token = "<|startoftext|>"
@classmethod
def list(cls):
return [c.value for c in cls]<jupyter_output><empty_output><jupyter_text>We will be finetuning Mistral-7B model. Let's load the tokenizer and add the special tokens followed by loading the base model and resizzing the embedding layers to accomodate the newly added tokens.<jupyter_code>model_name = "mistralai/Mistral-7B-v0.1"
tokenizer = AutoTokenizer.from_pretrained(
model_name,
pad_token=SpecialTokens.pad_token.value,
bos_token=SpecialTokens.bos_token.value,
eos_token=SpecialTokens.end_target.value,
additional_special_tokens=SpecialTokens.list(),
)
model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True
# use_flash_attention_2=True, # leading to an error
)
model.resize_token_embeddings(len(tokenizer))<jupyter_output><empty_output><jupyter_text>Apply LoRA<jupyter_code>config = LoraConfig(
r=64, lora_alpha=128, lora_dropout=0.0, target_modules=["embed_tokens", "lm_head", "q_proj", "v_proj"]
)
model = get_peft_model(model, config)
print(model.print_trainable_parameters())
print(model)<jupyter_output>trainable params: 31,886,720 || all params: 7,273,840,000 || trainable%: 0.43837532857472805
None
PeftModel(
(base_model): LoraModel(
(model): MistralForCausalLM(
(model): MistralModel(
(embed_tokens): lora.Embedding(
(base_layer): Embedding(32027, 4096)
(lora_dropout): ModuleDict(
(default): Identity()
)
(lora_A): ModuleDict()
(lora_B): ModuleDict()
(lora_embedding_A): ParameterDict( (default): Parameter containing: [torch.FloatTensor of size 64x32027])
(lora_embedding_B): ParameterDict( (default): Parameter containing: [torch.FloatTensor of size 4096x64])
)
(layers): ModuleList(
(0-31): 32 x MistralDecoderLayer(
(self_attn): MistralAttention(
(q_proj): lora.Linear(
(base_layer): Linear(in_features=4096, out_features=4096, bias=False)
(lora_dropout): ModuleDict(
(default): Identity([...]<jupyter_text>Preapre Dataset<jupyter_code>from datasets import load_dataset
dataset = load_dataset("smangrul/assistant_chatbot_dataset")
dataset = dataset["train"].train_test_split(0.2)
text_column = "context"
label_column = "target"
max_length = 512
def preprocess_function(examples):
batch_size = len(examples[text_column])
targets = [str(x) for x in examples[label_column]]
model_inputs = tokenizer(examples[text_column])
labels = tokenizer(targets, add_special_tokens=False) # don't add bos token because we concatenate with inputs
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i] + [tokenizer.eos_token_id]
# print(i, sample_input_ids, label_input_ids)
model_inputs["input_ids"][i] = sample_input_ids + label_input_ids
labels["input_ids"][i] = [-100] * len(sample_input_ids) + label_input_ids
model_inputs["attention_mask"][i] = [1] * len(model_inputs["input_ids"][i])
# print(model_inputs)
for i in range(batch_size):
sample_input_ids = model_inputs["input_ids"][i]
label_input_ids = labels["input_ids"][i]
model_inputs["input_ids"][i] = [tokenizer.pad_token_id] * (
max_length - len(sample_input_ids)
) + sample_input_ids
model_inputs["attention_mask"][i] = [0] * (max_length - len(sample_input_ids)) + model_inputs[
"attention_mask"
][i]
labels["input_ids"][i] = [-100] * (max_length - len(sample_input_ids)) + label_input_ids
model_inputs["input_ids"][i] = model_inputs["input_ids"][i][:max_length]
model_inputs["attention_mask"][i] = model_inputs["attention_mask"][i][:max_length]
labels["input_ids"][i] = labels["input_ids"][i][:max_length]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
processed_datasets = dataset.map(
preprocess_function,
batched=True,
num_proc=1,
remove_columns=dataset["train"].column_names,
load_from_cache_file=False,
desc="Running tokenizer on dataset",
)
train_dataset = processed_datasets["train"]
train_dataset
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=8, pin_memory=True
)
next(iter(train_dataloader))
tokenizer.decode(train_dataset[0]["input_ids"])<jupyter_output><empty_output><jupyter_text>Train the model<jupyter_code>training_args = TrainingArguments(
output_dir="mistral_lora_clm_with_added_tokens",
num_train_epochs=2,
save_total_limit=5,
per_device_train_batch_size=8,
warmup_steps=10,
weight_decay=0.0001,
dataloader_drop_last=True,
bf16=True,
logging_steps=10,
learning_rate=1e-5,
gradient_checkpointing=True,
gradient_checkpointing_kwargs={"use_reentrant": False},
remove_unused_columns=False,
hub_model_id="smangrul/mistral_lora_clm_with_added_tokens",
push_to_hub=True,
hub_private_repo=True,
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
data_collator=default_data_collator,
)
# model.config.use_cache = False
trainer.train()<jupyter_output>Detected kernel version 5.4.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher.
Failed to detect the name of this notebook, you can set it manually with the WANDB_NOTEBOOK_NAME environment variable to enable code saving.
[34m[1mwandb[0m: Currently logged in as: [33msmangrul[0m. Use [1m`wandb login --relogin`[0m to force relogin<jupyter_text>Check the model output on a sample from evaluation dataset<jupyter_code>import random
i = random.randint(0, len(dataset["test"]))
context = dataset["test"][i]["context"]
batch = tokenizer(context, return_tensors="pt")
batch = {k: v.to("cuda") for k, v in batch.items()}
model.eval()
output_tokens = model.generate(
**batch,
max_new_tokens=256,
do_sample=True,
temperature=0.2,
top_p=0.95,
top_k=50,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
target_predicted = tokenizer.decode(output_tokens[0], skip_special_tokens=False).split("<|endcontext|>")[1]
target = dataset["test"][i]["target"]
print(f"{context=} \n\n {target_predicted=} \n\n {target=}")<jupyter_output>context="<|begincontext|><|user|>Can you find me a place to eat please?<|system|>Where at? And what kind of cuisine are you craving?<|user|>Somewhere in SF, and I am really craving Thai food at the moment!<|system|>I found a bunch of restaurants, there's actually 10 that you might like in San Francisco, one of them being Baan Thai House & Wine Bar<|user|>How can I reach them? And what's their address?<|system|>You can reach them by phone at 415-379-4505 and visit them at 534 Irving Street<|beginlastuserutterance|>Great, that restaurant sounds good<|endlastuserutterance|><|endcontext|>"
target_predicted='<|begintarget|><|begindsts|><|begindst|><|beginintent|> FindRestaurants<|endintent|><|beginbelief|> Restaurants^city->SF~San Francisco|Restaurants^cuisine->Thai|Restaurants^restaurant_name->Baan Thai House & Wine Bar<|endbelief|><|enddst|><|enddsts|><|beginuseraction|> REQUEST->Restaurants^phone_number~|REQUEST->Restaurants^street_address~<|enduseraction|><|beginaction|> INFORM->Rest[...]<jupyter_text>Save the Adapter model When the lora layers are applied to embedding layers, the corresponding base model embedding layers are also saved.<jupyter_code>trainer.push_to_hub()
trainer.model.push_to_hub(training_args.output_dir)<jupyter_output>/raid/sourab/peft/src/peft/utils/save_and_load.py:128: UserWarning: Setting `is_embedding_layer_resized` to `True` as embedding layers found in `target_modules`
warnings.warn("Setting `is_embedding_layer_resized` to `True` as embedding layers found in `target_modules`")<jupyter_text>Check the model loading is working as expected and generating plausible outputs.<jupyter_code>from peft import PeftModel
inference_model = AutoModelForCausalLM.from_pretrained(
model_name,
low_cpu_mem_usage=True,
# use_flash_attention_2=True,
)
inference_model.resize_token_embeddings(len(tokenizer))
inference_model = PeftModel.from_pretrained(inference_model, "smangrul/mistral_lora_clm_with_added_tokens")
inference_model.to("cuda")
inference_model.eval()
output_tokens = inference_model.generate(
**batch,
max_new_tokens=256,
do_sample=True,
temperature=0.2,
top_p=0.95,
top_k=50,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.pad_token_id,
)
target_predicted = tokenizer.decode(output_tokens[0], skip_special_tokens=False).split("<|endcontext|>")[1]
print(f"{context=} \n\n {target_predicted=} \n\n {target=}")<jupyter_output><empty_output> | peft/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb/0 | {
"file_path": "peft/examples/causal_language_modeling/peft_lora_clm_with_additional_tokens.ipynb",
"repo_id": "peft",
"token_count": 4571
} | 340 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import math
import os
import random
from pathlib import Path
import datasets
import evaluate
import torch
import transformers
from accelerate import Accelerator
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import DatasetDict, load_dataset
from huggingface_hub import HfApi
from torch import nn
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoModel, AutoTokenizer, SchedulerType, default_data_collator, get_scheduler
from peft import LoraConfig, TaskType, get_peft_model
logger = get_logger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description="Training a PEFT model for Semantic Search task")
parser.add_argument("--dataset_name", type=str, default=None, help="dataset name on HF hub")
parser.add_argument(
"--max_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after tokenization. Sequences longer than this will be truncated,"
" sequences shorter will be padded if `--pad_to_max_length` is passed."
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=True,
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="all",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--sanity_test",
action="store_true",
help="Whether to enable sanity test.",
)
parser.add_argument(
"--use_peft",
action="store_true",
help="Whether to use PEFT.",
)
args = parser.parse_args()
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def save_model_hook(models, weights, output_dir):
for i, model in enumerate(models):
model.save_pretrained(output_dir, state_dict=weights[i])
# make sure to pop weight so that corresponding model is not saved again
weights.pop()
def load_model_hook(models, input_dir):
while len(models) > 0:
model = models.pop()
# pop models so that they are not loaded again
if hasattr(model, "active_adapter") and hasattr(model, "load_adapter"):
model.load_adapter(input_dir, model.active_adapter, is_trainable=True)
class AutoModelForSentenceEmbedding(nn.Module):
def __init__(self, model_name, tokenizer, normalize=True):
super().__init__()
self.model = AutoModel.from_pretrained(
model_name
) # , quantizaton_config=BitsAndBytesConfig(load_in_8bit=True), device_map={"":0})
self.normalize = normalize
self.tokenizer = tokenizer
def forward(self, **kwargs):
model_output = self.model(**kwargs)
embeddings = self.mean_pooling(model_output, kwargs["attention_mask"])
if self.normalize:
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
return embeddings
def mean_pooling(self, model_output, attention_mask):
token_embeddings = model_output[0] # First element of model_output contains all token embeddings
input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)
def __getattr__(self, name: str):
"""Forward missing attributes to the wrapped module."""
try:
return super().__getattr__(name) # defer to nn.Module's logic
except AttributeError:
return getattr(self.model, name)
def get_cosing_embeddings(query_embs, product_embs):
return torch.sum(query_embs * product_embs, axis=1)
def get_loss(cosine_score, labels):
return torch.mean(torch.square(labels * (1 - cosine_score) + torch.clamp((1 - labels) * cosine_score, min=0.0)))
def main():
args = parse_args()
accelerator_kwargs = {"gradient_accumulation_steps": args.gradient_accumulation_steps}
if args.with_tracking:
accelerator_kwargs["log_with"] = args.report_to
accelerator_kwargs["project_dir"] = args.output_dir
accelerator = Accelerator(**accelerator_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
api = HfApi(token=args.hub_token)
# Create repo (repo_name from args or inferred)
repo_name = args.hub_model_id
if repo_name is None:
repo_name = Path(args.output_dir).absolute().name
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# get the tokenizer
tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path)
# dataset download and preprocessing
if args.sanity_test:
train_dataset = load_dataset("smangrul/amazon_esci", split="train[:1024]")
val_dataset = load_dataset("smangrul/amazon_esci", split="validation[:1024]")
dataset = DatasetDict({"train": train_dataset, "validation": val_dataset})
else:
dataset = load_dataset(args.dataset_name)
def preprocess_function(examples):
queries = examples["query"]
result = tokenizer(queries, padding="max_length", max_length=70, truncation=True)
result = {f"query_{k}": v for k, v in result.items()}
products = examples["product_title"]
result_products = tokenizer(products, padding="max_length", max_length=70, truncation=True)
for k, v in result_products.items():
result[f"product_{k}"] = v
result["labels"] = examples["relevance_label"]
return result
processed_datasets = dataset.map(
preprocess_function,
batched=True,
remove_columns=dataset["train"].column_names,
desc="Running tokenizer on dataset",
)
# Log a few random samples from the training set:
for index in random.sample(range(len(processed_datasets["train"])), 3):
logger.info(f"Sample {index} of the training set: {processed_datasets['train'][index]}.")
# base model
model = AutoModelForSentenceEmbedding(args.model_name_or_path, tokenizer)
if args.use_peft:
# peft config and wrapping
peft_config = LoraConfig(
r=8,
lora_alpha=16,
bias="none",
task_type=TaskType.FEATURE_EXTRACTION,
target_modules=["key", "query", "value"],
)
model = get_peft_model(model, peft_config)
model.print_trainable_parameters()
accelerator.print(model)
# get dataloaders
train_dataloader = DataLoader(
processed_datasets["train"],
shuffle=True,
collate_fn=default_data_collator,
batch_size=args.per_device_train_batch_size,
pin_memory=True,
)
eval_dataloader = DataLoader(
processed_datasets["validation"],
shuffle=False,
collate_fn=default_data_collator,
batch_size=args.per_device_eval_batch_size,
pin_memory=True,
)
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# We need to recalculate our total training steps as the size of the training dataloader may have changed
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
checkpointing_steps = args.checkpointing_steps
if checkpointing_steps is not None and checkpointing_steps.isdigit():
checkpointing_steps = int(checkpointing_steps)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("peft_semantic_search", experiment_config)
metric = evaluate.load("roc_auc")
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
if args.use_peft:
# saving and loading checkpoints for resuming training
accelerator.register_save_state_pre_hook(save_model_hook)
accelerator.register_load_state_pre_hook(load_model_hook)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(processed_datasets['train'])}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(f"Resumed from checkpoint: {args.resume_from_checkpoint}")
accelerator.load_state(args.resume_from_checkpoint)
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
completed_steps = starting_epoch * num_update_steps_per_epoch
else:
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
completed_steps = resume_step // args.gradient_accumulation_steps
# update the progress_bar if load from checkpoint
progress_bar.update(completed_steps)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We skip the first `n` batches in the dataloader when resuming from a checkpoint
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
else:
active_dataloader = train_dataloader
for step, batch in enumerate(active_dataloader):
with accelerator.accumulate(model):
query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k})
product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k})
loss = get_loss(get_cosing_embeddings(query_embs, product_embs), batch["labels"])
total_loss += accelerator.reduce(loss.detach().float(), reduction="sum")
accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
model.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if (step + 1) % 100 == 0:
logger.info(f"Step: {step+1}, Loss: {total_loss/(step+1)}")
if args.with_tracking:
accelerator.log({"train/loss": total_loss / (step + 1)}, step=completed_steps)
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps }"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
model.eval()
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
query_embs = model(**{k.replace("query_", ""): v for k, v in batch.items() if "query" in k})
product_embs = model(**{k.replace("product_", ""): v for k, v in batch.items() if "product" in k})
prediction_scores = get_cosing_embeddings(query_embs, product_embs)
prediction_scores, references = accelerator.gather_for_metrics((prediction_scores, batch["labels"]))
metric.add_batch(
prediction_scores=prediction_scores,
references=references,
)
result = metric.compute()
result = {f"eval/{k}": v for k, v in result.items()}
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:", result)
if args.with_tracking:
result["train/epoch_loss"] = total_loss.item() / len(train_dataloader)
accelerator.log(result, step=completed_steps)
if args.output_dir is not None:
accelerator.wait_for_everyone()
if accelerator.is_main_process:
if isinstance(checkpointing_steps, str):
accelerator.save_state(os.path.join(args.output_dir, f"epoch_{epoch}"))
accelerator.unwrap_model(model).save_pretrained(
args.output_dir, state_dict=accelerator.get_state_dict(accelerator.unwrap_model(model))
)
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
commit_message = (
f"Training in progress epoch {epoch}"
if epoch < args.num_train_epochs - 1
else "End of training"
)
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message=commit_message,
run_as_future=True,
)
accelerator.wait_for_everyone()
accelerator.end_training()
if __name__ == "__main__":
main()
| peft/examples/feature_extraction/peft_lora_embedding_semantic_search.py/0 | {
"file_path": "peft/examples/feature_extraction/peft_lora_embedding_semantic_search.py",
"repo_id": "peft",
"token_count": 8720
} | 341 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import copy
import logging
import math
import os
import random
import re
from pathlib import Path
import datasets
import torch
import transformers
from accelerate import Accelerator, DistributedType
from accelerate.logging import get_logger
from accelerate.utils import set_seed
from datasets import load_dataset
from huggingface_hub import HfApi
from torch.utils.data import DataLoader
from tqdm.auto import tqdm
from transformers import (
CONFIG_MAPPING,
MODEL_MAPPING,
AutoConfig,
AutoModelForCausalLM,
AutoTokenizer,
BitsAndBytesConfig,
SchedulerType,
default_data_collator,
get_scheduler,
)
from transformers.utils import send_example_telemetry
from transformers.utils.versions import require_version
from peft import PeftModel
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
# check_min_version("4.32.0.dev0")
logger = get_logger(__name__)
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/language-modeling/requirements.txt")
MODEL_CONFIG_CLASSES = list(MODEL_MAPPING.keys())
MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def parse_args():
parser = argparse.ArgumentParser(description="Finetune a transformers model on a causal language modeling task")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--train_file", type=str, default=None, help="A csv, txt or a json file containing the training data."
)
parser.add_argument(
"--validation_file", type=str, default=None, help="A csv, txt or a json file containing the validation data."
)
parser.add_argument(
"--validation_split_percentage",
default=5,
help="The percentage of the train set used as validation set in case there's no validation split",
)
parser.add_argument(
"--model_name_or_path",
type=str,
help="Path to pretrained model or model identifier from huggingface.co/models.",
required=False,
)
parser.add_argument(
"--config_name",
type=str,
default=None,
help="Pretrained config name or path if not the same as model_name",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
type=int,
default=8,
help="Batch size (per device) for the training dataloader.",
)
parser.add_argument(
"--per_device_eval_batch_size",
type=int,
default=8,
help="Batch size (per device) for the evaluation dataloader.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=5e-5,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.")
parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.")
parser.add_argument(
"--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--lr_scheduler_type",
type=SchedulerType,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"],
)
parser.add_argument(
"--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.")
parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
parser.add_argument(
"--model_type",
type=str,
default=None,
help="Model type to use if training from scratch.",
choices=MODEL_TYPES,
)
parser.add_argument(
"--ignore_pad_token_for_loss",
type=bool,
default=True,
help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.",
)
parser.add_argument(
"--max_source_length",
type=int,
default=128,
help=(
"The maximum total input sequence length after "
"tokenization.Sequences longer than this will be truncated, sequences shorter will be padded."
),
)
parser.add_argument(
"--max_target_length",
type=int,
default=128,
help=(
"The maximum total sequence length for target text after "
"tokenization. Sequences longer than this will be truncated, sequences shorter will be padded."
"during ``evaluate`` and ``predict``."
),
)
parser.add_argument(
"--pad_to_max_length",
action="store_true",
help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.",
)
parser.add_argument(
"--preprocessing_num_workers",
type=int,
default=None,
help="The number of processes to use for the preprocessing.",
)
parser.add_argument(
"--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--no_keep_linebreaks", action="store_true", help="Do not keep line breaks when using TXT files."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`."
)
parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--trust_remote_code",
type=bool,
default=False,
help=(
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
),
)
parser.add_argument(
"--checkpointing_steps",
type=str,
default=None,
help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.",
)
parser.add_argument(
"--resume_from_checkpoint",
type=str,
default=None,
help="If the training should continue from a checkpoint folder.",
)
parser.add_argument(
"--with_tracking",
action="store_true",
help="Whether to enable experiment trackers for logging.",
)
parser.add_argument(
"--report_to",
type=str,
default="tensorboard",
help=(
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
"Only applicable when `--with_tracking` is passed."
),
)
parser.add_argument(
"--low_cpu_mem_usage",
action="store_true",
help=(
"It is an option to create the model as an empty shell, then only materialize its parameters when the pretrained weights are loaded."
"If passed, LLM loading time and RAM consumption will be benefited."
),
)
##########################
# Generation Config #
##########################
parser.add_argument(
"--temperature",
type=float,
default=0.8,
help="temperature of 1.0 has no effect, lower tend toward greedy sampling",
)
parser.add_argument("--k", type=int, default=40, help="Choose k candidate words")
parser.add_argument("--p", type=float, default=0.95, help="The sum of probability of candidate words is 0.9 ")
##########################
# Exp Args #
##########################
parser.add_argument(
"--adapter_name_or_path",
type=str,
default=None,
help=(
"The LoRA adapter checkpoint. Set None if you want to fine-tune from LoftQ."
"Specify a path if you want to evaluate."
),
)
args = parser.parse_args()
# Sanity checks
if args.dataset_name is None and args.train_file is None and args.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if args.train_file is not None:
extension = args.train_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, json or txt file."
if args.validation_file is not None:
extension = args.validation_file.split(".")[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, json or txt file."
if args.push_to_hub:
assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed."
return args
def main():
args = parse_args()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_clm_no_trainer", args)
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
# in the environment
accelerator_log_kwargs = {}
if args.with_tracking:
accelerator_log_kwargs["log_with"] = args.report_to
accelerator_log_kwargs["project_dir"] = args.output_dir
accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.info(accelerator.state, main_process_only=False)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Handle the repository creation
if accelerator.is_main_process:
if args.push_to_hub:
api = HfApi(token=args.hub_token)
# Create repo (repo_name from args or inferred)
repo_name = args.hub_model_id
if repo_name is None:
repo_name = Path(args.output_dir).absolute().name
repo_id = api.create_repo(repo_name, exist_ok=True).repo_id
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
accelerator.wait_for_everyone()
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(args.dataset_name, args.dataset_config_name)
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[:{args.validation_split_percentage}%]",
)
raw_datasets["train"] = load_dataset(
args.dataset_name,
args.dataset_config_name,
split=f"train[{args.validation_split_percentage}%:]",
)
else:
data_files = {}
dataset_args = {}
if args.train_file is not None:
data_files["train"] = args.train_file
if args.validation_file is not None:
data_files["validation"] = args.validation_file
extension = args.train_file.split(".")[-1]
if extension == "txt":
extension = "text"
dataset_args["keep_linebreaks"] = not args.no_keep_linebreaks
raw_datasets = load_dataset(extension, data_files=data_files, **dataset_args)
# If no validation data is there, validation_split_percentage will be used to divide the dataset.
if "validation" not in raw_datasets.keys():
raw_datasets["validation"] = load_dataset(
extension,
data_files=data_files,
split=f"train[:{args.validation_split_percentage}%]",
**dataset_args,
)
raw_datasets["train"] = load_dataset(
extension,
data_files=data_files,
split=f"train[{args.validation_split_percentage}%:]",
**dataset_args,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if args.config_name:
config = AutoConfig.from_pretrained(
args.config_name,
trust_remote_code=args.trust_remote_code,
)
elif args.model_name_or_path:
config = AutoConfig.from_pretrained(
args.model_name_or_path,
trust_remote_code=args.trust_remote_code,
)
else:
config = CONFIG_MAPPING[args.model_type]()
logger.warning("You are instantiating a new config instance from scratch.")
if args.tokenizer_name:
tokenizer = AutoTokenizer.from_pretrained(
args.tokenizer_name, use_fast=not args.use_slow_tokenizer, trust_remote_code=args.trust_remote_code
)
elif args.model_name_or_path:
tokenizer = AutoTokenizer.from_pretrained(
args.model_name_or_path,
use_fast=not args.use_slow_tokenizer,
trust_remote_code=args.trust_remote_code,
)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
##########################
# Tokenizer #
##########################
tokenizer.pad_token_id = 0 # unk. we want this to be different from the eos token
tokenizer.padding_side = "left" # Allow batched inference
tokenizer.truncation_side = "left"
if args.model_name_or_path:
model = AutoModelForCausalLM.from_pretrained(
args.model_name_or_path,
from_tf=bool(".ckpt" in args.model_name_or_path),
config=config,
low_cpu_mem_usage=True,
quantization_config=BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_use_double_quant=False,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype=config.torch_dtype,
),
)
else:
logger.info("Training new model from scratch")
model = AutoModelForCausalLM.from_config(config, trust_remote_code=args.trust_remote_code)
##########################
# Peft Model #
##########################
if args.adapter_name_or_path is None:
model = PeftModel.from_pretrained(model, args.model_name_or_path, subfolder="loftq_init", is_trainable=True)
else:
model = PeftModel.from_pretrained(model, args.adapter_name_or_path, is_trainable=True)
model.print_trainable_parameters()
# We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch
# on a small vocab and want a smaller embedding size, remove this test.
embedding_size = model.get_input_embeddings().weight.shape[0]
if len(tokenizer) > embedding_size:
model.resize_token_embeddings(len(tokenizer))
# Preprocessing the datasets.
# First we tokenize all the texts.
##########################
# GSM8K dataset #
##########################
# Preprocessing the datasets.
# First we tokenize all the texts.
column_names = raw_datasets["train"].column_names
# Get the column names for source/target.
source_column, target_column = "question", "answer"
# Temporarily set max_target_length for training.
padding = "max_length" if args.pad_to_max_length else False
task_prompt = "\nAnswer the above question. First think step by step and then answer the final number.\n"
def prompt_process(sent_1, sent_2, prompt_1="", prompt_2="", prompt_3=""):
sent_2 = sent_2.replace("####", "The final answer is")
return prompt_1 + sent_1 + prompt_2 + sent_2 + prompt_3
def preprocess_function_train(examples):
sources = examples[source_column]
targets = examples[target_column]
inputs = [prompt_process(source, target, prompt_2=task_prompt) for (source, target) in zip(sources, targets)]
model_inputs = tokenizer(
inputs,
max_length=args.max_source_length + args.max_target_length,
padding=padding,
truncation=True,
return_tensors="pt",
)
labels = copy.deepcopy(model_inputs)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and args.ignore_pad_token_for_loss:
# get the length of the target tokens. -1 to kick out the <BOS> token
target_tokens = tokenizer(targets, padding=False)
target_len = [len(label) - 1 for label in target_tokens["input_ids"]]
# don't calculate the loss from source and padding (left padding)
for i in range(len(labels["input_ids"])):
labels["input_ids"][i, : -target_len[i]] = -100
model_inputs["labels"] = labels["input_ids"]
return model_inputs
def preprocess_function_test(examples):
sources = examples[source_column]
labels = examples[target_column]
inputs = [source + task_prompt for source in sources]
model_inputs = tokenizer(inputs, max_length=args.max_source_length, padding=padding, truncation=True)
labels = tokenizer(labels, max_length=args.max_target_length, padding=padding, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
with accelerator.main_process_first():
train_dataset = raw_datasets["train"].map(
preprocess_function_train,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on training dataset",
)
eval_dataset = raw_datasets["test"].map(
preprocess_function_test,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on test dataset",
)
# Log a few random samples from the set:
for index in random.sample(range(len(train_dataset)), 2):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
for index in random.sample(range(len(eval_dataset)), 2):
logger.info(f"Sample {index} of the validation set: {eval_dataset[index]}.")
# DataLoaders creation:
train_dataloader = DataLoader(
train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size
)
eval_dataloader = DataLoader(
eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size
)
# Optimizer
# Split weights in two groups, one with weight decay and the other not.
no_decay = ["bias", "layer_norm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay) and "lora" in n],
"weight_decay": args.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
},
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=args.learning_rate)
# Scheduler and math around the number of training steps.
overrode_max_train_steps = False
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
overrode_max_train_steps = True
lr_scheduler = get_scheduler(
name=args.lr_scheduler_type,
optimizer=optimizer,
num_warmup_steps=args.num_warmup_steps * args.gradient_accumulation_steps,
num_training_steps=args.max_train_steps * args.gradient_accumulation_steps,
)
# Prepare everything with our `accelerator`.
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare(
model, optimizer, train_dataloader, eval_dataloader, lr_scheduler
)
# On TPU, the tie weights in our model have been disconnected, so we need to restore the ties.
if accelerator.distributed_type == DistributedType.TPU:
model.tie_weights()
# We need to recalculate our total training steps as the size of the training dataloader may have changed.
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if overrode_max_train_steps:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
# Afterwards we recalculate our number of training epochs
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
# Figure out how many steps we should save the Accelerator states
checkpointing_steps = args.checkpointing_steps
if checkpointing_steps is not None and checkpointing_steps.isdigit():
checkpointing_steps = int(checkpointing_steps)
# We need to initialize the trackers we use, and also store our configuration.
# The trackers initializes automatically on the main process.
if args.with_tracking:
experiment_config = vars(args)
# TensorBoard cannot log Enums, need the raw value
experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value
accelerator.init_trackers("clm_no_trainer", experiment_config)
# Train!
total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
# Only show the progress bar once on each machine.
progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process)
completed_steps = 0
starting_epoch = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
checkpoint_path = args.resume_from_checkpoint
path = os.path.basename(args.resume_from_checkpoint)
else:
# Get the most recent checkpoint
dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()]
dirs.sort(key=os.path.getctime)
path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
checkpoint_path = path
path = os.path.basename(checkpoint_path)
accelerator.print(f"Resumed from checkpoint: {checkpoint_path}")
accelerator.load_state(path)
# Extract `epoch_{i}` or `step_{i}`
training_difference = os.path.splitext(path)[0]
if "epoch" in training_difference:
starting_epoch = int(training_difference.replace("epoch_", "")) + 1
resume_step = None
completed_steps = starting_epoch * num_update_steps_per_epoch
else:
# need to multiply `gradient_accumulation_steps` to reflect real steps
resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps
starting_epoch = resume_step // len(train_dataloader)
resume_step -= starting_epoch * len(train_dataloader)
completed_steps = resume_step // args.gradient_accumulation_steps
# update the progress_bar if load from checkpoint
progress_bar.update(completed_steps)
for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
if args.with_tracking:
total_loss = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We skip the first `n` batches in the dataloader when resuming from a checkpoint
active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step)
else:
active_dataloader = train_dataloader
for step, batch in enumerate(active_dataloader):
with accelerator.accumulate(model):
outputs = model(**batch)
loss = outputs.loss
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(loss)
if completed_steps % 50:
accelerator.print(f"Epoch: {epoch} | Step: {completed_steps} | Loss: {loss}")
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# Checks if the accelerator has performed an optimization step behind the scenes
if accelerator.sync_gradients:
progress_bar.update(1)
completed_steps += 1
if isinstance(checkpointing_steps, int):
if completed_steps % checkpointing_steps == 0:
output_dir = f"step_{completed_steps}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if completed_steps >= args.max_train_steps:
break
model.eval()
gen_kwargs = {
"max_new_tokens": args.max_target_length,
"temperature": args.temperature,
"top_k": args.k,
"top_p": args.p,
"do_sample": True,
}
ans_pred_list = []
ans_gold_list = []
for step, batch in enumerate(eval_dataloader):
with torch.no_grad():
gen_kwargs["input_ids"] = batch["input_ids"]
gen_kwargs["attention_mask"] = batch["attention_mask"]
generated_tokens = accelerator.unwrap_model(model).generate(**gen_kwargs)
pred_tokens = generated_tokens[:, args.max_source_length :]
pred_tokens = accelerator.pad_across_processes(pred_tokens, dim=1, pad_index=tokenizer.pad_token_id)
gold_tokens = batch["labels"]
if not args.pad_to_max_length:
# If we did not pad to max length, we need to pad the labels too
gold_tokens = accelerator.pad_across_processes(
batch["labels"], dim=1, pad_index=tokenizer.pad_token_id
)
pred_tokens, gold_tokens = accelerator.gather_for_metrics((pred_tokens, gold_tokens))
pred_tokens, gold_tokens = pred_tokens.cpu().numpy(), gold_tokens.cpu().numpy()
if isinstance(pred_tokens, tuple):
pred_tokens = pred_tokens[0]
decoded_pred = tokenizer.batch_decode(pred_tokens, skip_special_tokens=True)
decoded_gold = tokenizer.batch_decode(gold_tokens, skip_special_tokens=True)
# Extract the numbers in sentences
accelerator.print(decoded_pred)
ans_pred_list += [extract_answer_number(sentence_pred) for sentence_pred in decoded_pred]
ans_gold_list += [extract_answer_number(sentence_gold) for sentence_gold in decoded_gold]
accelerator.print(ans_pred_list)
accelerator.print(ans_gold_list)
accuracy = compute_accuracy(ans_gold_list, ans_pred_list)
logger.info(f"epoch {epoch}: accuracy: {accuracy}")
if args.with_tracking:
accelerator.log(
{
"accuracy": accuracy,
"train_loss": total_loss.item() / len(train_dataloader),
"epoch": epoch,
"step": completed_steps,
},
step=completed_steps,
)
if args.push_to_hub and epoch < args.num_train_epochs - 1:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message=f"Training in progress epoch {epoch}",
run_as_future=True,
)
if args.checkpointing_steps == "epoch":
output_dir = f"epoch_{epoch}"
if args.output_dir is not None:
output_dir = os.path.join(args.output_dir, output_dir)
accelerator.save_state(output_dir)
if args.with_tracking:
accelerator.end_training()
if args.output_dir is not None:
accelerator.wait_for_everyone()
unwrapped_model = accelerator.unwrap_model(model)
unwrapped_model.save_pretrained(
args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save
)
if accelerator.is_main_process:
tokenizer.save_pretrained(args.output_dir)
if args.push_to_hub:
api.upload_folder(
repo_id=repo_id,
folder_path=args.output_dir,
commit_message="End of training",
)
PATTERN_NUMBER = re.compile(r"-?\d+\.?\d*")
def extract_answer_number(sentence: str) -> float:
sentence = sentence.replace(",", "")
pred = PATTERN_NUMBER.findall(sentence)
if not pred:
return float("inf")
segment = sentence.split("The final answer is ")
if len(segment) > 1:
pred_answer = segment[1]
pred_answer = PATTERN_NUMBER.findall(pred_answer)
if len(pred_answer) > 0:
pred_answer = pred_answer[0]
else:
pred_answer = float(pred[-1])
else:
pred_answer = float(pred[-1])
if isinstance(pred_answer, str):
try:
pred_answer = float(pred_answer)
except ValueError:
pred_answer = float("inf")
return pred_answer
def compute_accuracy(pred: list, gold: list):
acc = 0.0
for p, g in zip(pred, gold):
if p == g:
acc += 1
return acc / len(pred)
if __name__ == "__main__":
main()
| peft/examples/loftq_finetuning/train_gsm8k_llama.py/0 | {
"file_path": "peft/examples/loftq_finetuning/train_gsm8k_llama.py",
"repo_id": "peft",
"token_count": 14677
} | 342 |
<jupyter_start><jupyter_text>IntroductionIn this notebook, we will learn how to use [LoRA](https://arxiv.org/abs/2106.09685) from 🤗 PEFT to fine-tune a SegFormer model variant for semantic segmentation by ONLY using **14%** of the original trainable parameters of the model. LoRA adds low-rank "update matrices" to certain blocks in the underlying model (in this case the attention blocks) and ONLY trains those matrices during fine-tuning. During inference, these update matrices are _merged_ with the original model parameters. For more details, check out the [original LoRA paper](https://arxiv.org/abs/2106.09685). Let's get started by installing the dependencies. Install dependenciesHere we're installing `peft` from source to ensure we have access to all the bleeding edge features of `peft`.<jupyter_code>!pip install transformers accelerate evaluate datasets git+https://github.com/huggingface/peft -q<jupyter_output><empty_output><jupyter_text>AuthenticationWe will share our fine-tuned model at the end of training. So, to do that we just authenticate using our 🤗 token. This token is available from [here](https://huggingface.co/settings/tokens). If you don't have a 🤗 account already, we highly encourage you to do so; it's free!<jupyter_code>from huggingface_hub import notebook_login
notebook_login()<jupyter_output><empty_output><jupyter_text>Load a datasetWe're only loading the first 150 instances from the training set of the [SceneParse150 dataset](https://huggingface.co/datasets/scene_parse_150) to keep this example runtime short.<jupyter_code>from datasets import load_dataset
ds = load_dataset("scene_parse_150", split="train[:150]")<jupyter_output><empty_output><jupyter_text>Prepare train and test splits<jupyter_code>ds = ds.train_test_split(test_size=0.1)
train_ds = ds["train"]
test_ds = ds["test"]<jupyter_output><empty_output><jupyter_text>Prepare label mappersWe create two dictionaries:* `label2id`: maps the semantic classes of the dataset to integer ids.* `id2label`: `label2id` reversed.<jupyter_code>import json
from huggingface_hub import cached_download, hf_hub_url
repo_id = "huggingface/label-files"
filename = "ade20k-id2label.json"
id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
id2label = {int(k): v for k, v in id2label.items()}
label2id = {v: k for k, v in id2label.items()}
num_labels = len(id2label)<jupyter_output><empty_output><jupyter_text>Prepare datasets for training and evaluation<jupyter_code>from transformers import AutoImageProcessor
checkpoint = "nvidia/mit-b0"
image_processor = AutoImageProcessor.from_pretrained(checkpoint, do_reduce_labels=True)
from torchvision.transforms import ColorJitter
jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
from PIL import Image
import numpy as np
def handle_grayscale_image(image):
np_image = np.array(image)
if np_image.ndim == 2:
tiled_image = np.tile(np.expand_dims(np_image, -1), 3)
return Image.fromarray(tiled_image)
else:
return Image.fromarray(np_image)
def train_transforms(example_batch):
images = [jitter(handle_grayscale_image(x)) for x in example_batch["image"]]
labels = [x for x in example_batch["annotation"]]
inputs = image_processor(images, labels)
return inputs
def val_transforms(example_batch):
images = [handle_grayscale_image(x) for x in example_batch["image"]]
labels = [x for x in example_batch["annotation"]]
inputs = image_processor(images, labels)
return inputs
train_ds.set_transform(train_transforms)
test_ds.set_transform(val_transforms)<jupyter_output><empty_output><jupyter_text>Evaluation functionIncluding a metric during training is often helpful for evaluating your model’s performance. You can quickly load a evaluation method with the [🤗 Evaluate](https://huggingface.co/docs/evaluate/index) library. For this task, load the [mean Intersection over Union (IoU)](https://huggingface.co/spaces/evaluate-metric/accuracy) metric (see the 🤗 Evaluate [quick tour](https://huggingface.co/docs/evaluate/a_quick_tour) to learn more about how to load and compute a metric):<jupyter_code>import torch
from torch import nn
import evaluate
metric = evaluate.load("mean_iou")
def compute_metrics(eval_pred):
with torch.no_grad():
logits, labels = eval_pred
logits_tensor = torch.from_numpy(logits)
# scale the logits to the size of the label
logits_tensor = nn.functional.interpolate(
logits_tensor,
size=labels.shape[-2:],
mode="bilinear",
align_corners=False,
).argmax(dim=1)
pred_labels = logits_tensor.detach().cpu().numpy()
# currently using _compute instead of compute
# see this issue for more info: https://github.com/huggingface/evaluate/pull/328#issuecomment-1286866576
metrics = metric._compute(
predictions=pred_labels,
references=labels,
num_labels=len(id2label),
ignore_index=0,
reduce_labels=image_processor.do_reduce_labels,
)
# add per category metrics as individual key-value pairs
per_category_accuracy = metrics.pop("per_category_accuracy").tolist()
per_category_iou = metrics.pop("per_category_iou").tolist()
metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)})
metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)})
return metrics<jupyter_output><empty_output><jupyter_text>Load a base modelFor this example, we use the [SegFormer B0 variant](https://huggingface.co/nvidia/mit-b0).<jupyter_code>def print_trainable_parameters(model):
"""
Prints the number of trainable parameters in the model.
"""
trainable_params = 0
all_param = 0
for _, param in model.named_parameters():
all_param += param.numel()
if param.requires_grad:
trainable_params += param.numel()
print(
f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param:.2f}"
)<jupyter_output><empty_output><jupyter_text>We pass the `label2id` and `id2label` dictionaries to let the `AutoModelForSemanticSegmentation` class know that we're interested in a custom base model where the decoder head should be randomly initialized w.r.t our custom dataset. Note, however, that the rest of the model parameters are pre-trained and will be fine-tuned in a regular transfer learning setup.We also notice that the 100% parameters in the `model` are trainable.<jupyter_code>from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer
model = AutoModelForSemanticSegmentation.from_pretrained(
checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True
)
print_trainable_parameters(model)<jupyter_output><empty_output><jupyter_text>Wrap `model` as a `PeftModel` for LoRA trainingThis involves two steps:* Defining a config with `LoraConfig`* Wrapping the original `model` with `get_peft_model()` with the config defined in the step above.<jupyter_code>from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=32,
lora_alpha=32,
target_modules=["query", "value"],
lora_dropout=0.1,
bias="lora_only",
modules_to_save=["decode_head"],
)
lora_model = get_peft_model(model, config)
print_trainable_parameters(lora_model)<jupyter_output>===================================BUG REPORT===================================
Welcome to bitsandbytes. For bug reports, please submit your error trace to: https://github.com/TimDettmers/bitsandbytes/issues
================================================================================
trainable params: 564374 || all params: 3883766 || trainable%: 14.53<jupyter_text>Let's unpack what's going on here. In order for LoRA to take effect, we need to specify the target modules to `LoraConfig` so that `PeftModel` knows which modules inside our model needs to be amended with LoRA matrices. In this case, we're only interested in targetting the query and value matrices of the attention blocks of the base model. Since the parameters corresponding to these matrices are "named" with `query` and `value` respectively, we specify them accordingly in the `target_modules` argument of `LoraConfig`. We also specify `modules_to_save`. After we wrap our base model `model` with `PeftModel` along with the `config`, we get a new model where only the LoRA parameters are trainable (so-called "update matrices") while the pre-trained parameters are kept frozen. These include the parameters of the randomly initialized classifier parameters too. This is NOT we want when fine-tuning the base model on our custom dataset. To ensure that the classifier parameters are also trained, we specify `modules_to_save`. This also ensures that these modules are serialized alongside the LoRA trainable parameters when using utilities like `save_pretrained()` and `push_to_hub()`. Regarding the other parameters:* `r`: The dimension used by the LoRA update matrices.* `alpha`: Scaling factor.* `bias`: Specifying if the `bias` parameters should be trained. `lora_only` denotes only the LoRA `bias` parameters will be trained. `r` and `alpha` together control the total number of final trainable parameters when using LoRA giving us the flexbility to balance a trade-off between end performance and compute efficiency. We can also how many parameters we're actually training. Since we're interested in performing **parameter-efficient fine-tuning**, we should expect to notice a less number of trainable parameters from the `lora_model` in comparison to the original `model` which is indeed the case here. For sanity, let's also manually verify the modules that are actually trainable in `lora_model`.<jupyter_code>for name, param in lora_model.named_parameters():
if param.requires_grad:
print(name, param.shape)<jupyter_output>base_model.model.segformer.encoder.block.0.0.attention.self.query.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.0.attention.self.query.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.0.attention.self.value.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.0.attention.self.value.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.query.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.query.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.value.lora_A.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.0.1.attention.self.value.lora_B.weight torch.Size([32, 32])
base_model.model.segformer.encoder.block.1.0.attention.self.query.lora_A.weight torch.Size([32, 64])
base_model.model.segformer.encoder.block.1.0.attention.self.query.lora_B.weight torch.Size([...]<jupyter_text>We can confirm that only the LoRA parameters appended to the attention blocks and the `decode_head` parameters are trainable. Train!This is a two-step process: 1. Define your training hyperparameters in [TrainingArguments](https://huggingface.co/docs/transformers/v4.26.0/en/main_classes/trainertransformers.TrainingArguments). It is important you don’t remove unused columns because this’ll drop the image column. Without the image column, you can’t create `pixel_values`. Set `remove_unused_columns=False` to prevent this behavior! The only other required parameter is output_dir which specifies where to save your model. At the end of each epoch, the `Trainer` will evaluate the IoU metric and save the training checkpoint.2. Pass the training arguments to [Trainer](https://huggingface.co/docs/transformers/v4.26.0/en/main_classes/trainertransformers.Trainer) along with the model, dataset, tokenizer, data collator, and `compute_metrics` function.3. Call `train()` to finetune your model.**Note** that This example is meant to walk you through the workflow when using PEFT for semantic segmentation. We didn't perform extensive hyperparameter tuning to achieve optimal results.<jupyter_code>model_name = checkpoint.split("/")[-1]
training_args = TrainingArguments(
output_dir=f"{model_name}-scene-parse-150-lora",
learning_rate=5e-4,
num_train_epochs=50,
per_device_train_batch_size=4,
per_device_eval_batch_size=2,
save_total_limit=3,
evaluation_strategy="epoch",
save_strategy="epoch",
logging_steps=5,
remove_unused_columns=False,
push_to_hub=True,
label_names=["labels"],
)
trainer = Trainer(
model=lora_model,
args=training_args,
train_dataset=train_ds,
eval_dataset=test_ds,
compute_metrics=compute_metrics,
)
trainer.train()<jupyter_output><empty_output><jupyter_text>Saving the model and inference Here we use the `save_pretrained()` method of the `lora_model` to save the *LoRA-only parameters* locally. However, you can also use thr `push_to_hub()` method to upload these parameters directly to the Hugging Face Hub (as shown [here](https://colab.research.google.com/github/huggingface/peft/blob/main/examples/image_classification/image_classification_peft_lora.ipynb)).<jupyter_code>model_id = "segformer-scene-parse-150-lora"
lora_model.save_pretrained(model_id)<jupyter_output><empty_output><jupyter_text>We can see that the LoRA-only parameters are just **2.2 MB in size**! This greatly improves the portability when using very large models.<jupyter_code>!ls -lh {model_id}<jupyter_output>total 2.2M
-rw-r--r-- 1 root root 369 Feb 8 03:09 adapter_config.json
-rw-r--r-- 1 root root 2.2M Feb 8 03:09 adapter_model.bin<jupyter_text>Let's now prepare our `inference_model` and run an inference.<jupyter_code>from peft import PeftConfig
config = PeftConfig.from_pretrained(model_id)
model = AutoModelForSemanticSegmentation.from_pretrained(
checkpoint, id2label=id2label, label2id=label2id, ignore_mismatched_sizes=True
)
# Load the Lora model
inference_model = PeftModel.from_pretrained(model, model_id)<jupyter_output><empty_output><jupyter_text>Fetch an image.<jupyter_code>import requests
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png"
image = Image.open(requests.get(url, stream=True).raw)
image<jupyter_output><empty_output><jupyter_text>Preprocess the image.<jupyter_code># prepare image for the model
encoding = image_processor(image.convert("RGB"), return_tensors="pt")
print(encoding.pixel_values.shape)<jupyter_output>torch.Size([1, 3, 512, 512])<jupyter_text>Run an inference.<jupyter_code>with torch.no_grad():
outputs = inference_model(pixel_values=encoding.pixel_values)
logits = outputs.logits
upsampled_logits = nn.functional.interpolate(
logits,
size=image.size[::-1],
mode="bilinear",
align_corners=False,
)
pred_seg = upsampled_logits.argmax(dim=1)[0]<jupyter_output><empty_output><jupyter_text>Visualize the results.We need a color palette to visualize the results. Here, we use [one provided by the TensorFlow Model Garden repository](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.pyL51).<jupyter_code>def ade_palette():
"""Creates a label colormap used in ADE20K segmentation benchmark.
Returns:
A colormap for visualizing segmentation results.
"""
return np.asarray(
[
[0, 0, 0],
[120, 120, 120],
[180, 120, 120],
[6, 230, 230],
[80, 50, 50],
[4, 200, 3],
[120, 120, 80],
[140, 140, 140],
[204, 5, 255],
[230, 230, 230],
[4, 250, 7],
[224, 5, 255],
[235, 255, 7],
[150, 5, 61],
[120, 120, 70],
[8, 255, 51],
[255, 6, 82],
[143, 255, 140],
[204, 255, 4],
[255, 51, 7],
[204, 70, 3],
[0, 102, 200],
[61, 230, 250],
[255, 6, 51],
[11, 102, 255],
[255, 7, 71],
[255, 9, 224],
[9, 7, 230],
[220, 220, 220],
[255, 9, 92],
[112, 9, 255],
[8, 255, 214],
[7, 255, 224],
[255, 184, 6],
[10, 255, 71],
[255, 41, 10],
[7, 255, 255],
[224, 255, 8],
[102, 8, 255],
[255, 61, 6],
[255, 194, 7],
[255, 122, 8],
[0, 255, 20],
[255, 8, 41],
[255, 5, 153],
[6, 51, 255],
[235, 12, 255],
[160, 150, 20],
[0, 163, 255],
[140, 140, 140],
[250, 10, 15],
[20, 255, 0],
[31, 255, 0],
[255, 31, 0],
[255, 224, 0],
[153, 255, 0],
[0, 0, 255],
[255, 71, 0],
[0, 235, 255],
[0, 173, 255],
[31, 0, 255],
[11, 200, 200],
[255, 82, 0],
[0, 255, 245],
[0, 61, 255],
[0, 255, 112],
[0, 255, 133],
[255, 0, 0],
[255, 163, 0],
[255, 102, 0],
[194, 255, 0],
[0, 143, 255],
[51, 255, 0],
[0, 82, 255],
[0, 255, 41],
[0, 255, 173],
[10, 0, 255],
[173, 255, 0],
[0, 255, 153],
[255, 92, 0],
[255, 0, 255],
[255, 0, 245],
[255, 0, 102],
[255, 173, 0],
[255, 0, 20],
[255, 184, 184],
[0, 31, 255],
[0, 255, 61],
[0, 71, 255],
[255, 0, 204],
[0, 255, 194],
[0, 255, 82],
[0, 10, 255],
[0, 112, 255],
[51, 0, 255],
[0, 194, 255],
[0, 122, 255],
[0, 255, 163],
[255, 153, 0],
[0, 255, 10],
[255, 112, 0],
[143, 255, 0],
[82, 0, 255],
[163, 255, 0],
[255, 235, 0],
[8, 184, 170],
[133, 0, 255],
[0, 255, 92],
[184, 0, 255],
[255, 0, 31],
[0, 184, 255],
[0, 214, 255],
[255, 0, 112],
[92, 255, 0],
[0, 224, 255],
[112, 224, 255],
[70, 184, 160],
[163, 0, 255],
[153, 0, 255],
[71, 255, 0],
[255, 0, 163],
[255, 204, 0],
[255, 0, 143],
[0, 255, 235],
[133, 255, 0],
[255, 0, 235],
[245, 0, 255],
[255, 0, 122],
[255, 245, 0],
[10, 190, 212],
[214, 255, 0],
[0, 204, 255],
[20, 0, 255],
[255, 255, 0],
[0, 153, 255],
[0, 41, 255],
[0, 255, 204],
[41, 0, 255],
[41, 255, 0],
[173, 0, 255],
[0, 245, 255],
[71, 0, 255],
[122, 0, 255],
[0, 255, 184],
[0, 92, 255],
[184, 255, 0],
[0, 133, 255],
[255, 214, 0],
[25, 194, 194],
[102, 255, 0],
[92, 0, 255],
]
)
import matplotlib.pyplot as plt
color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8)
palette = np.array(ade_palette())
for label, color in enumerate(palette):
color_seg[pred_seg == label, :] = color
color_seg = color_seg[..., ::-1] # convert to BGR
img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map
img = img.astype(np.uint8)
plt.figure(figsize=(15, 10))
plt.imshow(img)
plt.show()<jupyter_output><empty_output> | peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb/0 | {
"file_path": "peft/examples/semantic_segmentation/semantic_segmentation_peft_lora.ipynb",
"repo_id": "peft",
"token_count": 8322
} | 343 |
python train.py \
--seed 100 \
--model_name_or_path "mistralai/Mistral-7B-v0.1" \
--dataset_name "smangrul/ultrachat-10k-chatml" \
--chat_template_format "chatml" \
--add_special_tokens False \
--append_concat_token False \
--splits "train,test" \
--max_seq_len 2048 \
--num_train_epochs 1 \
--logging_steps 5 \
--log_level "info" \
--logging_strategy "steps" \
--evaluation_strategy "epoch" \
--save_strategy "epoch" \
--push_to_hub \
--hub_private_repo True \
--hub_strategy "every_save" \
--bf16 True \
--packing True \
--learning_rate 1e-4 \
--lr_scheduler_type "cosine" \
--weight_decay 1e-4 \
--warmup_ratio 0.0 \
--max_grad_norm 1.0 \
--output_dir "mistral-sft-lora" \
--per_device_train_batch_size 8 \
--per_device_eval_batch_size 8 \
--gradient_accumulation_steps 8 \
--gradient_checkpointing True \
--use_reentrant True \
--dataset_text_field "content" \
--use_peft_lora True \
--lora_r 8 \
--lora_alpha 16 \
--lora_dropout 0.1 \
--lora_target_modules "all-linear" \
--use_4bit_quantization True \
--use_nested_quant True \
--bnb_4bit_compute_dtype "bfloat16" \
--use_flash_attn True
| peft/examples/sft/run_peft.sh/0 | {
"file_path": "peft/examples/sft/run_peft.sh",
"repo_id": "peft",
"token_count": 458
} | 344 |
import argparse
import json
import os
from datetime import date
from pathlib import Path
from tabulate import tabulate
MAX_LEN_MESSAGE = 2900 # slack endpoint has a limit of 3001 characters
parser = argparse.ArgumentParser()
parser.add_argument(
"--slack_channel_name",
default="peft-ci-daily",
)
def main(slack_channel_name=None):
failed = []
passed = []
group_info = []
total_num_failed = 0
empty_file = False or len(list(Path().glob("*.log"))) == 0
total_empty_files = []
for log in Path().glob("*.log"):
section_num_failed = 0
i = 0
with open(log) as f:
for line in f:
line = json.loads(line)
i += 1
if line.get("nodeid", "") != "":
test = line["nodeid"]
if line.get("duration", None) is not None:
duration = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
else:
passed.append([test, duration, log.name.split("_")[0]])
empty_file = i == 0
group_info.append([str(log), section_num_failed, failed])
total_empty_files.append(empty_file)
os.remove(log)
failed = []
text = (
"🌞 There were no failures!"
if not any(total_empty_files)
else "Something went wrong there is at least one empty file - please check GH action results."
)
no_error_payload = {
"type": "section",
"text": {
"type": "plain_text",
"text": text,
"emoji": True,
},
}
message = ""
payload = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": "🤗 Results of the {} PEFT scheduled tests.".format(os.environ.get("TEST_TYPE", "")),
},
},
]
if total_num_failed > 0:
for i, (name, num_failed, failed_tests) in enumerate(group_info):
if num_failed > 0:
if num_failed == 1:
message += f"*{name}: {num_failed} failed test*\n"
else:
message += f"*{name}: {num_failed} failed tests*\n"
failed_table = []
for test in failed_tests:
failed_table.append(test[0].split("::"))
failed_table = tabulate(
failed_table,
headers=["Test Location", "Test Case", "Test Name"],
showindex="always",
tablefmt="grid",
maxcolwidths=[12, 12, 12],
)
message += "\n```\n" + failed_table + "\n```"
if total_empty_files[i]:
message += f"\n*{name}: Warning! Empty file - please check the GitHub action job *\n"
print(f"### {message}")
else:
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
if len(message) > MAX_LEN_MESSAGE:
print(f"Truncating long message from {len(message)} to {MAX_LEN_MESSAGE}")
message = message[:MAX_LEN_MESSAGE] + "..."
if len(message) != 0:
md_report = {
"type": "section",
"text": {"type": "mrkdwn", "text": message},
}
payload.append(md_report)
action_button = {
"type": "section",
"text": {"type": "mrkdwn", "text": "*For more details:*"},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/peft/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
payload.append(action_button)
date_report = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f"Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}",
},
],
}
payload.append(date_report)
print(payload)
client = WebClient(token=os.environ.get("SLACK_API_TOKEN"))
client.chat_postMessage(channel=f"#{slack_channel_name}", text=message, blocks=payload)
if __name__ == "__main__":
args = parser.parse_args()
main(args.slack_channel_name)
| peft/scripts/log_reports.py/0 | {
"file_path": "peft/scripts/log_reports.py",
"repo_id": "peft",
"token_count": 2521
} | 345 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from .layer import AdaLoraLayer
class SVDQuantLinear(torch.nn.Module, AdaLoraLayer):
def __init__(
self,
base_layer,
adapter_name,
r: int = 0,
lora_alpha: int = 1,
lora_dropout: float = 0.0,
init_lora_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
AdaLoraLayer.__init__(self, base_layer)
# self.base_layer and self.quant_linear_module are the same; we need the former for consistency and the latter
# for backwards compatibility
self.quant_linear_module = base_layer
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, lora_alpha, lora_dropout, init_lora_weights)
def forward(self, x: torch.Tensor) -> torch.Tensor:
result = self.quant_linear_module(x)
if self.disable_adapters:
return result
for active_adapter in self.active_adapters:
if active_adapter not in self.lora_A.keys():
continue
lora_A = self.lora_A[active_adapter]
lora_B = self.lora_B[active_adapter]
lora_E = self.lora_E[active_adapter]
dropout = self.lora_dropout[active_adapter]
scaling = self.scaling[active_adapter]
ranknum = self.ranknum[active_adapter] + 1e-5
requires_conversion = not torch.is_autocast_enabled()
if requires_conversion:
expected_dtype = result.dtype
if x.dtype != torch.float32:
x = x.float()
output = (dropout(x) @ (lora_A * lora_E).T @ lora_B.T) * scaling / ranknum
# TODO: here, the dtype conversion is applied on the *whole expression*,
# not the intermediate result, unlike for SVDLinear8bitLT and
# SVDLinear4bit, is that correct?
if requires_conversion:
output = output.to(expected_dtype)
result += output
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "adalora." + rep
| peft/src/peft/tuners/adalora/gptq.py/0 | {
"file_path": "peft/src/peft/tuners/adalora/gptq.py",
"repo_id": "peft",
"token_count": 1173
} | 346 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any
import torch
from peft.import_utils import is_bnb_4bit_available, is_bnb_available
from .layer import IA3Layer
if is_bnb_available():
class Linear8bitLt(torch.nn.Module, IA3Layer):
# (IA)^3 implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
is_feedforward: bool,
init_ia3_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
# Freezing the pre-trained weight matrix
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
# note: no check for self.merged because merging is not supported (yet)
if self.disable_adapters:
return self.base_layer(x)
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
ia3_scaling *= self.ia3_l[active_adapter].flatten()
requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
if requires_conversion:
x = x.float()
if self.is_feedforward:
result = self.base_layer(x * ia3_scaling)
expected_dtype = result.dtype
else:
result = self.base_layer(x)
expected_dtype = result.dtype
result = result * ia3_scaling
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "ia3." + rep
if is_bnb_4bit_available():
class Linear4bit(torch.nn.Module, IA3Layer):
# IA3 implemented in a dense layer
def __init__(
self,
base_layer: torch.nn.Module,
adapter_name: str,
is_feedforward: bool,
init_ia3_weights: bool = True,
**kwargs,
) -> None:
super().__init__()
IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward)
# Freezing the pre-trained weight matrix
self.get_base_layer().weight.requires_grad = False
self._active_adapter = adapter_name
self.update_layer(adapter_name, init_ia3_weights)
def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor:
# note: no check for self.merged because merging is not supported (yet)
if self.disable_adapters:
return self.base_layer(x)
ia3_scaling = 1
for active_adapter in self.active_adapters:
if active_adapter not in self.ia3_l.keys():
continue
ia3_scaling *= self.ia3_l[active_adapter].flatten()
requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32)
if requires_conversion:
x = x.float()
if self.is_feedforward:
result = self.base_layer(x * ia3_scaling)
expected_dtype = result.dtype
else:
result = self.base_layer(x)
expected_dtype = result.dtype
result = result * ia3_scaling
result = result.clone()
# adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch.
# This has been duplicated here.
if requires_conversion:
result = result.to(expected_dtype)
return result
def __repr__(self) -> str:
rep = super().__repr__()
return "ia3." + rep
| peft/src/peft/tuners/ia3/bnb.py/0 | {
"file_path": "peft/src/peft/tuners/ia3/bnb.py",
"repo_id": "peft",
"token_count": 2193
} | 347 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from peft.import_utils import is_bnb_4bit_available, is_bnb_available, is_eetq_available
from .config import LoftQConfig, LoraConfig
from .gptq import QuantLinear
from .layer import Conv2d, Embedding, Linear, LoraLayer
from .model import LoraModel
__all__ = ["LoraConfig", "LoftQConfig", "Conv2d", "Embedding", "LoraLayer", "Linear", "LoraModel", "QuantLinear"]
def __getattr__(name):
if (name == "Linear8bitLt") and is_bnb_available():
from .bnb import Linear8bitLt
return Linear8bitLt
if (name == "Linear4bit") and is_bnb_4bit_available():
from .bnb import Linear4bit
return Linear4bit
if (name == "EetqLoraLinear") and is_eetq_available():
from .eetq import EetqLoraLinear
return EetqLoraLinear
raise AttributeError(f"module {__name__} has no attribute {name}")
| peft/src/peft/tuners/lora/__init__.py/0 | {
"file_path": "peft/src/peft/tuners/lora/__init__.py",
"repo_id": "peft",
"token_count": 484
} | 348 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from peft.tuners.prompt_tuning import PromptEmbedding
from peft.utils import TaskType
from .config import MultitaskPromptTuningConfig, MultitaskPromptTuningInit
# This code is adapted for the paper: https://arxiv.org/abs/2303.02861 and
# constitutes the work done at MIT-IBM Watson Research Lab.
class MultitaskPromptEmbedding(PromptEmbedding):
def __init__(self, config: MultitaskPromptTuningConfig, word_embeddings):
super().__init__(config, word_embeddings)
self.num_tasks = config.num_tasks
self.num_ranks = config.num_ranks
self.num_virtual_tokens = config.num_virtual_tokens
self.num_transformer_submodules = config.num_transformer_submodules
if self.num_transformer_submodules is None:
self.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1
self.token_dim = config.token_dim
total_virtual_tokens = self.num_virtual_tokens * self.num_transformer_submodules
self.prefix_task_cols = torch.nn.Parameter(
torch.normal(
mean=0,
std=0.02,
size=(self.num_tasks, total_virtual_tokens, self.num_ranks),
)
)
self.prefix_task_rows = torch.nn.Parameter(
torch.normal(
mean=0,
std=0.02,
size=(self.num_tasks, self.num_ranks, self.token_dim),
)
)
if config.prompt_tuning_init in [
MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
MultitaskPromptTuningInit.ONLY_SOURCE_SHARED,
]:
if config.prompt_tuning_init_state_dict_path is None:
raise ValueError(
f"prompt_tuning_init_state_dict_path needs to be specified with {config.prompt_tuning_init} "
"init method"
)
if config.prompt_tuning_init_state_dict_path.endswith(".safetensors"):
from safetensors.torch import load_file
state_dict: dict = load_file(config.prompt_tuning_init_state_dict_path)
else:
state_dict: dict = torch.load(
config.prompt_tuning_init_state_dict_path,
map_location=word_embeddings.weight.device,
)
if config.prompt_tuning_init in [
MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS,
MultitaskPromptTuningInit.EXACT_SOURCE_TASK,
]:
prefix_task_cols_: torch.Tensor = state_dict["prefix_task_cols"]
prefix_task_rows_: torch.Tensor = state_dict["prefix_task_rows"]
if config.prompt_tuning_init == MultitaskPromptTuningInit.AVERAGE_SOURCE_TASKS:
prefix_task_cols_ = prefix_task_cols_.mean(0, keepdim=True)
prefix_task_rows_ = prefix_task_rows_.mean(0, keepdim=True)
elif config.prompt_tuning_init == MultitaskPromptTuningInit.EXACT_SOURCE_TASK:
prefix_task_cols_ = prefix_task_cols_[config.prompt_tuning_init_task, ...].unsqueeze(0)
prefix_task_rows_ = prefix_task_rows_[config.prompt_tuning_init_task, ...].unsqueeze(0)
state_dict = {
"embedding.weight": state_dict["prompt_embeddings"],
"prefix_task_cols": prefix_task_cols_,
"prefix_task_rows": prefix_task_rows_,
}
self.load_state_dict(state_dict, strict=True)
elif config.prompt_tuning_init == MultitaskPromptTuningInit.ONLY_SOURCE_SHARED:
state_dict = {
"embedding.weight": state_dict["prompt_embeddings"],
}
self.load_state_dict(state_dict, strict=False)
def forward(self, indices, task_ids):
if task_ids is None:
raise ValueError("task_ids cannot be None")
prompt_embeddings = self.embedding(indices)
task_cols = torch.index_select(self.prefix_task_cols, 0, task_ids)
task_rows = torch.index_select(self.prefix_task_rows, 0, task_ids)
task_prompts = torch.matmul(task_cols, task_rows)
prompt_embeddings *= task_prompts
return prompt_embeddings
| peft/src/peft/tuners/multitask_prompt_tuning/model.py/0 | {
"file_path": "peft/src/peft/tuners/multitask_prompt_tuning/model.py",
"repo_id": "peft",
"token_count": 2235
} | 349 |
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
import os
import warnings
from typing import Optional
import torch
from huggingface_hub import file_exists, hf_hub_download
from huggingface_hub.utils import EntryNotFoundError
from safetensors.torch import load_file as safe_load_file
from .other import (
EMBEDDING_LAYER_NAMES,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
check_file_exists_on_hf_hub,
infer_device,
)
from .peft_types import PeftType
def has_valid_embedding_base_layer(layer):
"""Check if the layer has an embedding base layer"""
return hasattr(layer, "base_layer") and isinstance(layer.base_layer, (torch.nn.Linear, torch.nn.Embedding))
def get_embedding_layer_name(model, layer, is_embedding_in_target_modules):
"""Get the name of the embedding module for a given layer."""
for name, module in model.named_modules():
if (not is_embedding_in_target_modules and module == layer) or module == getattr(layer, "base_layer", None):
return name
return None
def get_peft_model_state_dict(
model, state_dict=None, adapter_name="default", unwrap_compiled=False, save_embedding_layers="auto"
):
"""
Get the state dict of the Peft model.
Args:
model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,
the model should be the underlying model/unwrapped model (i.e. model.module).
state_dict (`dict`, *optional*, defaults to `None`):
The state dict of the model. If not provided, the state dict of the passed model will be used.
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter whose state dict should be returned.
unwrap_compiled (`bool`, *optional*, defaults to `False`):
Whether to unwrap the model if torch.compile was used.
save_embedding_layers (`Union[bool, str]`, , *optional*, defaults to `auto`):
If `True`, save the embedding layers in addition to adapter weights. If `auto`, checks the common embedding
layers `peft.utils.other.EMBEDDING_LAYER_NAMES` in config's `target_modules` when available. Based on it
sets the boolean flag. This only works for 🤗 transformers models.
"""
if unwrap_compiled:
model = getattr(model, "_orig_mod", model)
config = model.peft_config[adapter_name]
if state_dict is None:
state_dict = model.state_dict()
if config.peft_type in (PeftType.LORA, PeftType.ADALORA):
# to_return = lora_state_dict(model, bias=model.peft_config.bias)
# adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`
# to be used directly with the state dict which is necessary when using DeepSpeed or FSDP
bias = config.bias
if bias == "none":
to_return = {k: state_dict[k] for k in state_dict if "lora_" in k}
elif bias == "all":
to_return = {k: state_dict[k] for k in state_dict if "lora_" in k or "bias" in k}
elif bias == "lora_only":
to_return = {}
for k in state_dict:
if "lora_" in k:
to_return[k] = state_dict[k]
bias_name = k.split("lora_")[0] + "bias"
if bias_name in state_dict:
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
to_return = {k: v for k, v in to_return.items() if (("lora_" in k and adapter_name in k) or ("bias" in k))}
if config.peft_type == PeftType.ADALORA:
rank_pattern = config.rank_pattern
if rank_pattern is not None:
rank_pattern = {k.replace(f".{adapter_name}", ""): v for k, v in rank_pattern.items()}
config.rank_pattern = rank_pattern
to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)
elif config.peft_type == PeftType.BOFT:
bias = config.bias
if bias == "none":
to_return = {k: state_dict[k] for k in state_dict if "boft_" in k}
elif bias == "all":
to_return = {k: state_dict[k] for k in state_dict if "boft_" in k or "bias" in k}
elif bias == "boft_only":
to_return = {}
for k in state_dict:
if "boft_" in k:
to_return[k] = state_dict[k]
bias_name = k.split("boft_")[0] + "bias"
if bias_name in state_dict:
to_return[bias_name] = state_dict[bias_name]
else:
raise NotImplementedError
elif config.peft_type == PeftType.LOHA:
to_return = {k: state_dict[k] for k in state_dict if "hada_" in k}
elif config.peft_type == PeftType.LOKR:
to_return = {k: state_dict[k] for k in state_dict if "lokr_" in k}
elif config.peft_type == PeftType.ADAPTION_PROMPT:
to_return = {k: state_dict[k] for k in state_dict if k.split(".")[-1].startswith("adaption_")}
elif config.is_prompt_learning:
to_return = {}
if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
to_return["prefix_task_cols"] = model.prompt_encoder[adapter_name].prefix_task_cols
to_return["prefix_task_rows"] = model.prompt_encoder[adapter_name].prefix_task_rows
prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
else:
if config.inference_mode:
prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight
else:
prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)
to_return["prompt_embeddings"] = prompt_embeddings
elif config.peft_type == PeftType.IA3:
to_return = {k: state_dict[k] for k in state_dict if "ia3_" in k}
elif config.peft_type == PeftType.OFT:
to_return = {k: state_dict[k] for k in state_dict if "oft_" in k}
elif config.peft_type == PeftType.POLY:
to_return = {k: state_dict[k] for k in state_dict if "poly_" in k}
elif config.peft_type == PeftType.LN_TUNING:
to_return = {k: state_dict[k] for k in state_dict if "ln_tuning_" in k}
elif config.peft_type == PeftType.VERA:
to_return = {k: state_dict[k] for k in state_dict if "vera_lambda_" in k}
if config.save_projection:
# TODO: adding vera_A and vera_B to `self.get_base_layer` would
# make name to match here difficult to predict.
if f"base_model.vera_A.{adapter_name}" not in state_dict:
raise ValueError(
"Model was initialised to not save vera_A and vera_B but config now specifies to save projection!"
" Set `config.save_projection` to `False`."
)
to_return["base_model.vera_A." + adapter_name] = state_dict["base_model.vera_A." + adapter_name]
to_return["base_model.vera_B." + adapter_name] = state_dict["base_model.vera_B." + adapter_name]
else:
raise ValueError(f"Unknown PEFT type passed: {config.peft_type}")
if getattr(model, "modules_to_save", None) is not None:
for key, value in state_dict.items():
if any(f"{module_name}.modules_to_save.{adapter_name}" in key for module_name in model.modules_to_save):
to_return[key.replace("modules_to_save.", "")] = value
# check the common embedding layers in `target_modules` to reset `save_embedding_layers` if necessary
is_embedding_in_target_modules = False
if (
save_embedding_layers == "auto"
and hasattr(config, "target_modules")
and any(k in config.target_modules for k in EMBEDDING_LAYER_NAMES)
):
warnings.warn("Setting `save_embedding_layers` to `True` as embedding layers found in `target_modules`.")
save_embedding_layers = is_embedding_in_target_modules = True
elif save_embedding_layers == "auto":
vocab_size = getattr(getattr(model, "config", None), "vocab_size", None)
model_id = getattr(config, "base_model_name_or_path", None)
# For some models e.g. diffusers the text config file is stored in a subfolder
# we need to make sure we can download that config.
has_remote_config = False
# ensure that this check is not performed in HF offline mode, see #1452
if model_id is not None:
exists = check_file_exists_on_hf_hub(model_id, "config.json")
if exists is None:
# check failed, could not determine if it exists or not
warnings.warn(
f"Could not find a config file in {model_id} - will assume that the vocabulary was not modified."
)
has_remote_config = False
else:
has_remote_config = exists
# check if the vocab size of the base model is different from the vocab size of the finetuned model
if (
vocab_size
and model_id
and has_remote_config
and (vocab_size != model.config.__class__.from_pretrained(model_id).vocab_size)
):
warnings.warn(
"Setting `save_embedding_layers` to `True` as the embedding layer has been resized during finetuning."
)
save_embedding_layers = True
else:
save_embedding_layers = False
if save_embedding_layers and hasattr(model, "get_input_embeddings"):
for layer in [model.get_input_embeddings(), model.get_output_embeddings()]:
if not is_embedding_in_target_modules or has_valid_embedding_base_layer(layer):
# support from version >= 0.6.2
embedding_module_name = get_embedding_layer_name(model, layer, is_embedding_in_target_modules)
if embedding_module_name:
to_return.update({k: v for k, v in state_dict.items() if embedding_module_name in k})
elif save_embedding_layers:
warnings.warn("Could not identify embedding layer(s) because the model is not a 🤗 transformers model.")
to_return = {k.replace(f".{adapter_name}", ""): v for k, v in to_return.items()}
return to_return
def _find_mismatched_keys(
model: torch.nn.Module, peft_model_state_dict: dict[str, torch.Tensor], ignore_mismatched_sizes: bool = False
) -> tuple[dict[str, torch.Tensor], list[tuple[str, tuple[int, ...], tuple[int, ...]]]]:
if not ignore_mismatched_sizes:
return peft_model_state_dict, []
mismatched = []
state_dict = model.state_dict()
for key, tensor in peft_model_state_dict.items():
if key not in state_dict:
continue
# see https://github.com/huggingface/transformers/blob/09f9f566de83eef1f13ee83b5a1bbeebde5c80c1/src/transformers/modeling_utils.py#L3858-L3864
if (state_dict[key].shape[-1] == 1) and (state_dict[key].numel() * 2 == tensor.numel()):
# This skips size mismatches for 4-bit weights. Two 4-bit values share an 8-bit container, causing size
# differences. Without matching with module type or paramter type it seems like a practical way to detect
# valid 4bit weights.
continue
if state_dict[key].shape != tensor.shape:
mismatched.append((key, tensor.shape, state_dict[key].shape))
for key, _, _ in mismatched:
del peft_model_state_dict[key]
return peft_model_state_dict, mismatched
def set_peft_model_state_dict(
model, peft_model_state_dict, adapter_name="default", ignore_mismatched_sizes: bool = False
):
"""
Set the state dict of the Peft model.
Args:
model ([`PeftModel`]):
The Peft model.
peft_model_state_dict (`dict`):
The state dict of the Peft model.
adapter_name (`str`, *optional*, defaults to `"default"`):
The name of the adapter whose state dict should be set.
ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
Whether to ignore mismatched in the state dict.
"""
config = model.peft_config[adapter_name]
state_dict = {}
if getattr(model, "modules_to_save", None) is not None:
for key, value in peft_model_state_dict.items():
if any(module_name in key for module_name in model.modules_to_save):
for module_name in model.modules_to_save:
if module_name in key:
key = key.replace(module_name, f"{module_name}.modules_to_save.{adapter_name}")
break
state_dict[key] = value
else:
state_dict = peft_model_state_dict
if config.peft_type in (
PeftType.LORA,
PeftType.LOHA,
PeftType.LOKR,
PeftType.ADALORA,
PeftType.IA3,
PeftType.OFT,
PeftType.POLY,
PeftType.LN_TUNING,
PeftType.BOFT,
PeftType.VERA,
):
peft_model_state_dict = {}
parameter_prefix = {
PeftType.IA3: "ia3_",
PeftType.LORA: "lora_",
PeftType.ADALORA: "lora_",
PeftType.LOHA: "hada_",
PeftType.LOKR: "lokr_",
PeftType.OFT: "oft_",
PeftType.POLY: "poly_",
PeftType.BOFT: "boft_",
PeftType.LN_TUNING: "ln_tuning_",
PeftType.VERA: "vera_lambda_",
}[config.peft_type]
for k, v in state_dict.items():
if parameter_prefix in k:
suffix = k.split(parameter_prefix)[1]
if "." in suffix:
suffix_to_replace = ".".join(suffix.split(".")[1:])
k = k.replace(suffix_to_replace, f"{adapter_name}.{suffix_to_replace}")
else:
k = f"{k}.{adapter_name}"
peft_model_state_dict[k] = v
else:
peft_model_state_dict[k] = v
if config.peft_type == PeftType.ADALORA:
rank_pattern = config.rank_pattern
if rank_pattern is not None:
model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)
elif config.peft_type == PeftType.VERA:
if config.save_projection and "base_model.vera_A" not in peft_model_state_dict:
raise ValueError(
"Specified to load vera_A and vera_B from state dictionary however they were not present!"
)
elif not config.save_projection and "base_model.vera_A" in peft_model_state_dict:
warnings.warn(
"Specified to not load vera_A and vera_B from state dictionary however they are present in state"
" dictionary! Consider using them to ensure checkpoint loading is correct on all platforms using"
" `peft_config.save_projection = True`"
)
elif not config.save_projection: # and no vera_A in state dictionary
warnings.warn(
"Specified to not load vera_A and vera_B from state dictionary. This means we will be relying on"
" PRNG initialisation to restore these projections using `config.projection_prng_key`, which may"
" not be accurate on all system configurations."
)
elif config.is_prompt_learning or config.peft_type == PeftType.ADAPTION_PROMPT:
peft_model_state_dict = state_dict
else:
raise NotImplementedError
peft_model_state_dict, mismatched_keys = _find_mismatched_keys(
model, peft_model_state_dict, ignore_mismatched_sizes=ignore_mismatched_sizes
)
load_result = model.load_state_dict(peft_model_state_dict, strict=False)
if config.is_prompt_learning:
model.prompt_encoder[adapter_name].embedding.load_state_dict(
{"weight": peft_model_state_dict["prompt_embeddings"]}, strict=True
)
if config.peft_type == PeftType.MULTITASK_PROMPT_TUNING:
model.prompt_encoder[adapter_name].load_state_dict(peft_model_state_dict, strict=False)
if mismatched_keys:
# see https://github.com/huggingface/transformers/blob/09f9f566de83eef1f13ee83b5a1bbeebde5c80c1/src/transformers/modeling_utils.py#L4039
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
msg = (
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint "
f"and are being ignored because you passed `ignore_mismatched_sizes=True`: {mismatched_warning}."
)
warnings.warn(msg)
return load_result
def load_peft_weights(model_id: str, device: Optional[str] = None, **hf_hub_download_kwargs) -> dict:
r"""
A helper method to load the PEFT weights from the HuggingFace Hub or locally
Args:
model_id (`str`):
The local path to the adapter weights or the name of the adapter to load from the HuggingFace Hub.
device (`str`):
The device to load the weights onto.
hf_hub_download_kwargs (`dict`):
Additional arguments to pass to the `hf_hub_download` method when loading from the HuggingFace Hub.
"""
path = (
os.path.join(model_id, hf_hub_download_kwargs["subfolder"])
if hf_hub_download_kwargs.get("subfolder", None) is not None
else model_id
)
if device is None:
device = infer_device()
if os.path.exists(os.path.join(path, SAFETENSORS_WEIGHTS_NAME)):
filename = os.path.join(path, SAFETENSORS_WEIGHTS_NAME)
use_safetensors = True
elif os.path.exists(os.path.join(path, WEIGHTS_NAME)):
filename = os.path.join(path, WEIGHTS_NAME)
use_safetensors = False
else:
token = hf_hub_download_kwargs.get("token", None)
if token is None:
token = hf_hub_download_kwargs.get("use_auth_token", None)
hub_filename = (
os.path.join(hf_hub_download_kwargs["subfolder"], SAFETENSORS_WEIGHTS_NAME)
if hf_hub_download_kwargs.get("subfolder", None) is not None
else SAFETENSORS_WEIGHTS_NAME
)
has_remote_safetensors_file = file_exists(
repo_id=model_id,
filename=hub_filename,
revision=hf_hub_download_kwargs.get("revision", None),
repo_type=hf_hub_download_kwargs.get("repo_type", None),
token=token,
)
use_safetensors = has_remote_safetensors_file
if has_remote_safetensors_file:
# Priority 1: load safetensors weights
filename = hf_hub_download(
model_id,
SAFETENSORS_WEIGHTS_NAME,
**hf_hub_download_kwargs,
)
else:
try:
filename = hf_hub_download(model_id, WEIGHTS_NAME, **hf_hub_download_kwargs)
except EntryNotFoundError:
raise ValueError(
f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. "
f"Please check that the file {WEIGHTS_NAME} or {SAFETENSORS_WEIGHTS_NAME} is present at {model_id}."
)
if use_safetensors:
if hasattr(torch.backends, "mps") and (device == torch.device("mps")):
adapters_weights = safe_load_file(filename, device="cpu")
else:
adapters_weights = safe_load_file(filename, device=device)
else:
adapters_weights = torch.load(filename, map_location=torch.device(device))
return adapters_weights
| peft/src/peft/utils/save_and_load.py/0 | {
"file_path": "peft/src/peft/utils/save_and_load.py",
"repo_id": "peft",
"token_count": 9207
} | 350 |
#!/usr/bin/env python3
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib
import os
import unittest
import torch
import torch.nn.init as init
from peft import LoraConfig, PeftModel, get_peft_model, get_peft_model_state_dict
def is_megatron_available() -> bool:
return importlib.util.find_spec("megatron") is not None
if is_megatron_available():
from megatron.core import parallel_state, tensor_parallel
from megatron.core.tensor_parallel.random import model_parallel_cuda_manual_seed
from megatron.core.transformer.module import MegatronModule
from megatron.core.transformer.transformer_config import TransformerConfig
world_size = 1
rank = 0
def initialize_distributed():
print(f"Initializing torch.distributed with rank: {rank}, world_size: {world_size}")
torch.cuda.set_device(0)
init_method = "tcp://"
master_ip = os.getenv("MASTER_ADDR", "localhost")
master_port = os.getenv("MASTER_PORT", "6001")
init_method += master_ip + ":" + master_port
torch.distributed.init_process_group(backend="nccl", world_size=world_size, rank=rank, init_method=init_method)
def destroy_model_parallel():
parallel_state.destroy_model_parallel()
torch.distributed.barrier()
def initialize_model_parallel(
tensor_model_parallel_size=1,
pipeline_model_parallel_size=1,
virtual_pipeline_model_parallel_size=None,
pipeline_model_parallel_split_rank=None,
):
parallel_state.destroy_model_parallel()
if not torch.distributed.is_initialized():
initialize_distributed()
parallel_state.initialize_model_parallel(
tensor_model_parallel_size,
pipeline_model_parallel_size,
virtual_pipeline_model_parallel_size,
pipeline_model_parallel_split_rank,
)
class DummyModule(MegatronModule):
def __init__(self, config: TransformerConfig):
super().__init__(config)
self.linear = tensor_parallel.ColumnParallelLinear(
input_size=10,
output_size=10,
config=config,
init_method=init.xavier_normal_,
bias=False,
gather_output=False,
)
self.lm_head = tensor_parallel.RowParallelLinear(
input_size=10,
output_size=10,
config=config,
init_method=init.xavier_normal_,
bias=False,
input_is_parallel=True,
skip_bias_add=True,
)
def forward(self, input):
x = self.linear(input)[0]
x = self.lm_head(x)[0]
return x
class TestMegatronLora(unittest.TestCase):
def setUp(self):
initialize_model_parallel(1, 1)
model_parallel_cuda_manual_seed(123)
transformer_config = {
"num_layers": 2,
"hidden_size": 12,
"num_attention_heads": 4,
"use_cpu_initialization": True,
}
config = TransformerConfig(**transformer_config)
self.megatron_module = DummyModule(config=config).cuda()
self.dummy_module = copy.deepcopy(self.megatron_module).cuda()
lora_config = LoraConfig(
lora_alpha=16,
lora_dropout=0.1,
r=64,
bias="none",
target_modules=["linear", "lm_head"],
megatron_config=config,
megatron_core="megatron.core",
)
self.megatron_module = get_peft_model(self.megatron_module, lora_config)
def tearDown(self):
destroy_model_parallel()
def test_megatron_lora_module(self):
megatron_module = self.megatron_module
assert isinstance(megatron_module, PeftModel)
for name, module in megatron_module.named_modules():
if name.endswith("linear"):
assert hasattr(module, "lora_A")
assert hasattr(module, "lora_B")
if name.endswith("linear.lora_A.default"):
assert isinstance(module, torch.nn.Linear)
if name.endswith("linear.lora_B.default"):
assert isinstance(module, tensor_parallel.ColumnParallelLinear)
if name.endswith("lm_head.lora_A.default"):
assert isinstance(module, tensor_parallel.RowParallelLinear)
if name.endswith("lm_head.lora_B.default"):
assert isinstance(module, torch.nn.Linear)
def test_forward(self):
x = torch.ones((2, 4, 10)).cuda()
megatron_module_result = self.megatron_module(x)
dummt_module_result = self.dummy_module(x)
# Because lora_B is initialized with 0, the forward results of two models should be equal before backward.
assert megatron_module_result.equal(dummt_module_result)
def test_backward(self):
optimizer = torch.optim.AdamW(self.megatron_module.parameters())
loss_fn = torch.nn.CrossEntropyLoss()
x = torch.randn(2, 4, 10, requires_grad=True).cuda()
label = torch.randint(10, (2 * 4,)).cuda()
output = self.megatron_module(x)
output = output.reshape(2 * 4, 10)
loss = loss_fn(output, label)
loss.backward()
optimizer.step()
def test_get_peft_model_state_dict(self):
peft_state_dict = get_peft_model_state_dict(self.megatron_module)
for key in peft_state_dict.keys():
assert "lora" in key
| peft/tests/test_lora_megatron.py/0 | {
"file_path": "peft/tests/test_lora_megatron.py",
"repo_id": "peft",
"token_count": 2964
} | 351 |
title: Model Pages | pytorch-image-models/docs/models/.pages/0 | {
"file_path": "pytorch-image-models/docs/models/.pages",
"repo_id": "pytorch-image-models",
"token_count": 4
} | 352 |
# ESE-VoVNet
**VoVNet** is a convolutional neural network that seeks to make [DenseNet](https://paperswithcode.com/method/densenet) more efficient by concatenating all features only once in the last feature map, which makes input size constant and enables enlarging new output channel.
Read about [one-shot aggregation here](https://paperswithcode.com/method/one-shot-aggregation).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{lee2019energy,
title={An Energy and GPU-Computation Efficient Backbone Network for Real-Time Object Detection},
author={Youngwan Lee and Joong-won Hwang and Sangrok Lee and Yuseok Bae and Jongyoul Park},
year={2019},
eprint={1904.09730},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: ESE VovNet
Paper:
Title: 'CenterMask : Real-Time Anchor-Free Instance Segmentation'
URL: https://paperswithcode.com/paper/centermask-real-time-anchor-free-instance-1
Models:
- Name: ese_vovnet19b_dw
In Collection: ESE VovNet
Metadata:
FLOPs: 1711959904
Parameters: 6540000
File Size: 26243175
Architecture:
- Batch Normalization
- Convolution
- Max Pooling
- One-Shot Aggregation
- ReLU
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: ese_vovnet19b_dw
Layers: 19
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/vovnet.py#L361
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet19b_dw-a8741004.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.82%
Top 5 Accuracy: 93.28%
- Name: ese_vovnet39b
In Collection: ESE VovNet
Metadata:
FLOPs: 9089259008
Parameters: 24570000
File Size: 98397138
Architecture:
- Batch Normalization
- Convolution
- Max Pooling
- One-Shot Aggregation
- ReLU
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: ese_vovnet39b
Layers: 39
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/vovnet.py#L371
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/ese_vovnet39b-f912fe73.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.31%
Top 5 Accuracy: 94.72%
-->
| pytorch-image-models/docs/models/.templates/models/ese-vovnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/ese-vovnet.md",
"repo_id": "pytorch-image-models",
"token_count": 1127
} | 353 |
# MixNet
**MixNet** is a type of convolutional neural network discovered via AutoML that utilises [MixConvs](https://paperswithcode.com/method/mixconv) instead of regular [depthwise convolutions](https://paperswithcode.com/method/depthwise-convolution).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2019mixconv,
title={MixConv: Mixed Depthwise Convolutional Kernels},
author={Mingxing Tan and Quoc V. Le},
year={2019},
eprint={1907.09595},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: MixNet
Paper:
Title: 'MixConv: Mixed Depthwise Convolutional Kernels'
URL: https://paperswithcode.com/paper/mixnet-mixed-depthwise-convolutional-kernels
Models:
- Name: mixnet_l
In Collection: MixNet
Metadata:
FLOPs: 738671316
Parameters: 7330000
File Size: 29608232
Architecture:
- Batch Normalization
- Dense Connections
- Dropout
- Global Average Pooling
- Grouped Convolution
- MixConv
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- MNAS
Training Data:
- ImageNet
ID: mixnet_l
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1669
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_l-5a9a2ed8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.98%
Top 5 Accuracy: 94.18%
- Name: mixnet_m
In Collection: MixNet
Metadata:
FLOPs: 454543374
Parameters: 5010000
File Size: 20298347
Architecture:
- Batch Normalization
- Dense Connections
- Dropout
- Global Average Pooling
- Grouped Convolution
- MixConv
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- MNAS
Training Data:
- ImageNet
ID: mixnet_m
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1660
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_m-4647fc68.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.27%
Top 5 Accuracy: 93.42%
- Name: mixnet_s
In Collection: MixNet
Metadata:
FLOPs: 321264910
Parameters: 4130000
File Size: 16727982
Architecture:
- Batch Normalization
- Dense Connections
- Dropout
- Global Average Pooling
- Grouped Convolution
- MixConv
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- MNAS
Training Data:
- ImageNet
ID: mixnet_s
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1651
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_s-a907afbc.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.99%
Top 5 Accuracy: 92.79%
- Name: mixnet_xl
In Collection: MixNet
Metadata:
FLOPs: 1195880424
Parameters: 11900000
File Size: 48001170
Architecture:
- Batch Normalization
- Dense Connections
- Dropout
- Global Average Pooling
- Grouped Convolution
- MixConv
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- MNAS
Training Data:
- ImageNet
ID: mixnet_xl
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1678
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mixnet_xl_ra-aac3c00c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.47%
Top 5 Accuracy: 94.93%
-->
| pytorch-image-models/docs/models/.templates/models/mixnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/mixnet.md",
"repo_id": "pytorch-image-models",
"token_count": 1878
} | 354 |
# SE-ResNet
**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{hu2019squeezeandexcitation,
title={Squeeze-and-Excitation Networks},
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
year={2019},
eprint={1709.01507},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: SE ResNet
Paper:
Title: Squeeze-and-Excitation Networks
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
Models:
- Name: seresnet152d
In Collection: SE ResNet
Metadata:
FLOPs: 20161904304
Parameters: 66840000
File Size: 268144497
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: seresnet152d
LR: 0.6
Epochs: 100
Layers: 152
Dropout: 0.2
Crop Pct: '0.94'
Momentum: 0.9
Batch Size: 1024
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1206
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet152d_ra2-04464dd2.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.74%
Top 5 Accuracy: 96.77%
- Name: seresnet50
In Collection: SE ResNet
Metadata:
FLOPs: 5285062320
Parameters: 28090000
File Size: 112621903
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: seresnet50
LR: 0.6
Epochs: 100
Layers: 50
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1180
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet50_ra_224-8efdb4bb.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.26%
Top 5 Accuracy: 95.07%
-->
| pytorch-image-models/docs/models/.templates/models/se-resnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/se-resnet.md",
"repo_id": "pytorch-image-models",
"token_count": 1371
} | 355 |
# TResNet
A **TResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that aim to boost accuracy while maintaining GPU training and inference efficiency. They contain several design tricks including a SpaceToDepth stem, [Anti-Alias downsampling](https://paperswithcode.com/method/anti-alias-downsampling), In-Place Activated BatchNorm, Blocks selection and [squeeze-and-excitation layers](https://paperswithcode.com/method/squeeze-and-excitation-block).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{ridnik2020tresnet,
title={TResNet: High Performance GPU-Dedicated Architecture},
author={Tal Ridnik and Hussam Lawen and Asaf Noy and Emanuel Ben Baruch and Gilad Sharir and Itamar Friedman},
year={2020},
eprint={2003.13630},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: TResNet
Paper:
Title: 'TResNet: High Performance GPU-Dedicated Architecture'
URL: https://paperswithcode.com/paper/tresnet-high-performance-gpu-dedicated
Models:
- Name: tresnet_l
In Collection: TResNet
Metadata:
FLOPs: 10873416792
Parameters: 53456696
File Size: 224440219
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_l
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L267
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_81_5-235b486c.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.49%
Top 5 Accuracy: 95.62%
- Name: tresnet_l_448
In Collection: TResNet
Metadata:
FLOPs: 43488238584
Parameters: 53456696
File Size: 224440219
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_l_448
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '448'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L285
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.26%
Top 5 Accuracy: 95.98%
- Name: tresnet_m
In Collection: TResNet
Metadata:
FLOPs: 5733048064
Parameters: 41282200
File Size: 125861314
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
Training Time: < 24 hours
ID: tresnet_m
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L261
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_80_8-dbc13962.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.8%
Top 5 Accuracy: 94.86%
- Name: tresnet_m_448
In Collection: TResNet
Metadata:
FLOPs: 22929743104
Parameters: 29278464
File Size: 125861314
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_m_448
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '448'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L279
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_m_448-bc359d10.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.72%
Top 5 Accuracy: 95.57%
- Name: tresnet_xl
In Collection: TResNet
Metadata:
FLOPs: 15162534034
Parameters: 75646610
File Size: 314378965
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_xl
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L273
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_xl_82_0-a2d51b00.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.05%
Top 5 Accuracy: 95.93%
- Name: tresnet_xl_448
In Collection: TResNet
Metadata:
FLOPs: 60641712730
Parameters: 75646610
File Size: 224440219
Architecture:
- 1x1 Convolution
- Anti-Alias Downsampling
- Convolution
- Global Average Pooling
- InPlace-ABN
- Leaky ReLU
- ReLU
- Residual Connection
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- AutoAugment
- Cutout
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA 100 GPUs
ID: tresnet_xl_448
LR: 0.01
Epochs: 300
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '448'
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/tresnet.py#L291
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tresnet/tresnet_l_448-940d0cd1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.06%
Top 5 Accuracy: 96.19%
-->
| pytorch-image-models/docs/models/.templates/models/tresnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/tresnet.md",
"repo_id": "pytorch-image-models",
"token_count": 3391
} | 356 |
# (Tensorflow) MobileNet v3
**MobileNetV3** is a convolutional neural network that is designed for mobile phone CPUs. The network design includes the use of a [hard swish activation](https://paperswithcode.com/method/hard-swish) and [squeeze-and-excitation](https://paperswithcode.com/method/squeeze-and-excitation-block) modules in the [MBConv blocks](https://paperswithcode.com/method/inverted-residual-block).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```python
import timm
model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True)
model.eval()
```
To load and preprocess the image:
```python
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```python
import torch
with torch.no_grad():
out = model(tensor)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
print(probabilities.shape)
# prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```python
# Get imagenet class mappings
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
# prints class names and probabilities like:
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_mobilenetv3_large_075`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```python
model = timm.create_model('tf_mobilenetv3_large_075', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/abs-1905-02244,
author = {Andrew Howard and
Mark Sandler and
Grace Chu and
Liang{-}Chieh Chen and
Bo Chen and
Mingxing Tan and
Weijun Wang and
Yukun Zhu and
Ruoming Pang and
Vijay Vasudevan and
Quoc V. Le and
Hartwig Adam},
title = {Searching for MobileNetV3},
journal = {CoRR},
volume = {abs/1905.02244},
year = {2019},
url = {http://arxiv.org/abs/1905.02244},
archivePrefix = {arXiv},
eprint = {1905.02244},
timestamp = {Tue, 12 Jan 2021 15:30:06 +0100},
biburl = {https://dblp.org/rec/journals/corr/abs-1905-02244.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF MobileNet V3
Paper:
Title: Searching for MobileNetV3
URL: https://paperswithcode.com/paper/searching-for-mobilenetv3
Models:
- Name: tf_mobilenetv3_large_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 194323712
Parameters: 3990000
File Size: 16097377
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_075
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L394
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_075-150ee8b0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.45%
Top 5 Accuracy: 91.34%
- Name: tf_mobilenetv3_large_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 274535288
Parameters: 5480000
File Size: 22076649
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L403
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_100-427764d5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.51%
Top 5 Accuracy: 92.61%
- Name: tf_mobilenetv3_large_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 267216928
Parameters: 3920000
File Size: 15836368
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x4 TPU Pod
ID: tf_mobilenetv3_large_minimal_100
LR: 0.1
Dropout: 0.8
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L412
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_large_minimal_100-8596ae28.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.24%
Top 5 Accuracy: 90.64%
- Name: tf_mobilenetv3_small_075
In Collection: TF MobileNet V3
Metadata:
FLOPs: 48457664
Parameters: 2040000
File Size: 8242701
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_075
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L421
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_075-da427f52.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 65.72%
Top 5 Accuracy: 86.13%
- Name: tf_mobilenetv3_small_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 65450600
Parameters: 2540000
File Size: 10256398
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L430
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_100-37f49e2b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 67.92%
Top 5 Accuracy: 87.68%
- Name: tf_mobilenetv3_small_minimal_100
In Collection: TF MobileNet V3
Metadata:
FLOPs: 60827936
Parameters: 2040000
File Size: 8258083
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Hard Swish
- Inverted Residual Block
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 16x GPUs
ID: tf_mobilenetv3_small_minimal_100
LR: 0.045
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4096
Image Size: '224'
Weight Decay: 4.0e-05
Interpolation: bilinear
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/mobilenetv3.py#L439
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_mobilenetv3_small_minimal_100-922a7843.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 62.91%
Top 5 Accuracy: 84.24%
--> | pytorch-image-models/docs/models/tf-mobilenet-v3.md/0 | {
"file_path": "pytorch-image-models/docs/models/tf-mobilenet-v3.md",
"repo_id": "pytorch-image-models",
"token_count": 4778
} | 357 |
# AdvProp (EfficientNet)
**AdvProp** is an adversarial training scheme which treats adversarial examples as additional examples, to prevent overfitting. Key to the method is the usage of a separate auxiliary batch norm for adversarial examples, as they have different underlying distributions to normal examples.
The weights from this model were ported from [Tensorflow/TPU](https://github.com/tensorflow/tpu).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `tf_efficientnet_b0_ap`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('tf_efficientnet_b0_ap', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{xie2020adversarial,
title={Adversarial Examples Improve Image Recognition},
author={Cihang Xie and Mingxing Tan and Boqing Gong and Jiang Wang and Alan Yuille and Quoc V. Le},
year={2020},
eprint={1911.09665},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: AdvProp
Paper:
Title: Adversarial Examples Improve Image Recognition
URL: https://paperswithcode.com/paper/adversarial-examples-improve-image
Models:
- Name: tf_efficientnet_b0_ap
In Collection: AdvProp
Metadata:
FLOPs: 488688572
Parameters: 5290000
File Size: 21385973
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b0_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 2048
Image Size: '224'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1334
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b0_ap-f262efe1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.1%
Top 5 Accuracy: 93.26%
- Name: tf_efficientnet_b1_ap
In Collection: AdvProp
Metadata:
FLOPs: 883633200
Parameters: 7790000
File Size: 31515350
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b1_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.882'
Momentum: 0.9
Batch Size: 2048
Image Size: '240'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1344
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b1_ap-44ef0a3d.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.28%
Top 5 Accuracy: 94.3%
- Name: tf_efficientnet_b2_ap
In Collection: AdvProp
Metadata:
FLOPs: 1234321170
Parameters: 9110000
File Size: 36800745
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b2_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.89'
Momentum: 0.9
Batch Size: 2048
Image Size: '260'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1354
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b2_ap-2f8e7636.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.3%
Top 5 Accuracy: 95.03%
- Name: tf_efficientnet_b3_ap
In Collection: AdvProp
Metadata:
FLOPs: 2275247568
Parameters: 12230000
File Size: 49384538
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b3_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.904'
Momentum: 0.9
Batch Size: 2048
Image Size: '300'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1364
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b3_ap-aad25bdd.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 81.82%
Top 5 Accuracy: 95.62%
- Name: tf_efficientnet_b4_ap
In Collection: AdvProp
Metadata:
FLOPs: 5749638672
Parameters: 19340000
File Size: 77993585
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b4_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.922'
Momentum: 0.9
Batch Size: 2048
Image Size: '380'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1374
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b4_ap-dedb23e6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.26%
Top 5 Accuracy: 96.39%
- Name: tf_efficientnet_b5_ap
In Collection: AdvProp
Metadata:
FLOPs: 13176501888
Parameters: 30390000
File Size: 122403150
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b5_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.934'
Momentum: 0.9
Batch Size: 2048
Image Size: '456'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1384
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b5_ap-9e82fae8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.25%
Top 5 Accuracy: 96.97%
- Name: tf_efficientnet_b6_ap
In Collection: AdvProp
Metadata:
FLOPs: 24180518488
Parameters: 43040000
File Size: 173237466
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b6_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.942'
Momentum: 0.9
Batch Size: 2048
Image Size: '528'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1394
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b6_ap-4ffb161f.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 84.79%
Top 5 Accuracy: 97.14%
- Name: tf_efficientnet_b7_ap
In Collection: AdvProp
Metadata:
FLOPs: 48205304880
Parameters: 66349999
File Size: 266850607
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b7_ap
LR: 0.256
Epochs: 350
Crop Pct: '0.949'
Momentum: 0.9
Batch Size: 2048
Image Size: '600'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1405
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b7_ap-ddb28fec.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.12%
Top 5 Accuracy: 97.25%
- Name: tf_efficientnet_b8_ap
In Collection: AdvProp
Metadata:
FLOPs: 80962956270
Parameters: 87410000
File Size: 351412563
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Techniques:
- AdvProp
- AutoAugment
- Label Smoothing
- RMSProp
- Stochastic Depth
- Weight Decay
Training Data:
- ImageNet
ID: tf_efficientnet_b8_ap
LR: 0.128
Epochs: 350
Crop Pct: '0.954'
Momentum: 0.9
Batch Size: 2048
Image Size: '672'
Weight Decay: 1.0e-05
Interpolation: bicubic
RMSProp Decay: 0.9
Label Smoothing: 0.1
BatchNorm Momentum: 0.99
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L1416
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_efficientnet_b8_ap-00e169fa.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 85.37%
Top 5 Accuracy: 97.3%
--> | pytorch-image-models/hfdocs/source/models/advprop.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/advprop.mdx",
"repo_id": "pytorch-image-models",
"token_count": 6032
} | 358 |
# (Gluon) ResNeXt
A **ResNeXt** repeats a [building block](https://paperswithcode.com/method/resnext-block) that aggregates a set of transformations with the same topology. Compared to a [ResNet](https://paperswithcode.com/method/resnet), it exposes a new dimension, *cardinality* (the size of the set of transformations) \\( C \\), as an essential factor in addition to the dimensions of depth and width.
The weights from this model were ported from [Gluon](https://cv.gluon.ai/model_zoo/classification.html).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('gluon_resnext101_32x4d', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `gluon_resnext101_32x4d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('gluon_resnext101_32x4d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/XieGDTH16,
author = {Saining Xie and
Ross B. Girshick and
Piotr Doll{\'{a}}r and
Zhuowen Tu and
Kaiming He},
title = {Aggregated Residual Transformations for Deep Neural Networks},
journal = {CoRR},
volume = {abs/1611.05431},
year = {2016},
url = {http://arxiv.org/abs/1611.05431},
archivePrefix = {arXiv},
eprint = {1611.05431},
timestamp = {Mon, 13 Aug 2018 16:45:58 +0200},
biburl = {https://dblp.org/rec/journals/corr/XieGDTH16.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: Gloun ResNeXt
Paper:
Title: Aggregated Residual Transformations for Deep Neural Networks
URL: https://paperswithcode.com/paper/aggregated-residual-transformations-for-deep
Models:
- Name: gluon_resnext101_32x4d
In Collection: Gloun ResNeXt
Metadata:
FLOPs: 10298145792
Parameters: 44180000
File Size: 177367414
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnext101_32x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L193
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_32x4d-b253c8c4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.33%
Top 5 Accuracy: 94.91%
- Name: gluon_resnext101_64x4d
In Collection: Gloun ResNeXt
Metadata:
FLOPs: 19954172928
Parameters: 83460000
File Size: 334737852
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnext101_64x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L201
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext101_64x4d-f9a8e184.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.63%
Top 5 Accuracy: 95.0%
- Name: gluon_resnext50_32x4d
In Collection: Gloun ResNeXt
Metadata:
FLOPs: 5472648192
Parameters: 25030000
File Size: 100441719
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Global Average Pooling
- Grouped Convolution
- Max Pooling
- ReLU
- ResNeXt Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: gluon_resnext50_32x4d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/gluon_resnet.py#L185
Weights: https://github.com/rwightman/pytorch-pretrained-gluonresnet/releases/download/v0.1/gluon_resnext50_32x4d-e6a097c1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.35%
Top 5 Accuracy: 94.42%
-->
| pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/gloun-resnext.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2709
} | 359 |
# Models
[[autodoc]] timm.create_model
[[autodoc]] timm.list_models
| pytorch-image-models/hfdocs/source/reference/models.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/reference/models.mdx",
"repo_id": "pytorch-image-models",
"token_count": 29
} | 360 |
import os
from typing import Optional
from .reader_image_folder import ReaderImageFolder
from .reader_image_in_tar import ReaderImageInTar
def create_reader(
name: str,
root: Optional[str] = None,
split: str = 'train',
**kwargs,
):
kwargs = {k: v for k, v in kwargs.items() if v is not None}
name = name.lower()
name = name.split('/', 1)
prefix = ''
if len(name) > 1:
prefix = name[0]
name = name[-1]
# FIXME improve the selection right now just tfds prefix or fallback path, will need options to
# explicitly select other options shortly
if prefix == 'hfds':
from .reader_hfds import ReaderHfds # defer Hf datasets import
reader = ReaderHfds(name=name, root=root, split=split, **kwargs)
elif prefix == 'hfids':
from .reader_hfids import ReaderHfids # defer HF datasets import
reader = ReaderHfids(name=name, root=root, split=split, **kwargs)
elif prefix == 'tfds':
from .reader_tfds import ReaderTfds # defer tensorflow import
reader = ReaderTfds(name=name, root=root, split=split, **kwargs)
elif prefix == 'wds':
from .reader_wds import ReaderWds
kwargs.pop('download', False)
reader = ReaderWds(root=root, name=name, split=split, **kwargs)
else:
assert os.path.exists(root)
# default fallback path (backwards compat), use image tar if root is a .tar file, otherwise image folder
# FIXME support split here or in reader?
if os.path.isfile(root) and os.path.splitext(root)[1] == '.tar':
reader = ReaderImageInTar(root, **kwargs)
else:
reader = ReaderImageFolder(root, **kwargs)
return reader
| pytorch-image-models/timm/data/readers/reader_factory.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/reader_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 694
} | 361 |
""" PyTorch selectable adaptive pooling
Adaptive pooling with the ability to select the type of pooling from:
* 'avg' - Average pooling
* 'max' - Max pooling
* 'avgmax' - Sum of average and max pooling re-scaled by 0.5
* 'avgmaxc' - Concatenation of average and max pooling along feature dim, doubles feature dim
Both a functional and a nn.Module version of the pooling is provided.
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from .format import get_spatial_dim, get_channel_dim
_int_tuple_2_t = Union[int, Tuple[int, int]]
def adaptive_pool_feat_mult(pool_type='avg'):
if pool_type.endswith('catavgmax'):
return 2
else:
return 1
def adaptive_avgmax_pool2d(x, output_size: _int_tuple_2_t = 1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return 0.5 * (x_avg + x_max)
def adaptive_catavgmax_pool2d(x, output_size: _int_tuple_2_t = 1):
x_avg = F.adaptive_avg_pool2d(x, output_size)
x_max = F.adaptive_max_pool2d(x, output_size)
return torch.cat((x_avg, x_max), 1)
def select_adaptive_pool2d(x, pool_type='avg', output_size: _int_tuple_2_t = 1):
"""Selectable global pooling function with dynamic input kernel size
"""
if pool_type == 'avg':
x = F.adaptive_avg_pool2d(x, output_size)
elif pool_type == 'avgmax':
x = adaptive_avgmax_pool2d(x, output_size)
elif pool_type == 'catavgmax':
x = adaptive_catavgmax_pool2d(x, output_size)
elif pool_type == 'max':
x = F.adaptive_max_pool2d(x, output_size)
else:
assert False, 'Invalid pool type: %s' % pool_type
return x
class FastAdaptiveAvgPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: F = 'NCHW'):
super(FastAdaptiveAvgPool, self).__init__()
self.flatten = flatten
self.dim = get_spatial_dim(input_fmt)
def forward(self, x):
return x.mean(self.dim, keepdim=not self.flatten)
class FastAdaptiveMaxPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
super(FastAdaptiveMaxPool, self).__init__()
self.flatten = flatten
self.dim = get_spatial_dim(input_fmt)
def forward(self, x):
return x.amax(self.dim, keepdim=not self.flatten)
class FastAdaptiveAvgMaxPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
super(FastAdaptiveAvgMaxPool, self).__init__()
self.flatten = flatten
self.dim = get_spatial_dim(input_fmt)
def forward(self, x):
x_avg = x.mean(self.dim, keepdim=not self.flatten)
x_max = x.amax(self.dim, keepdim=not self.flatten)
return 0.5 * x_avg + 0.5 * x_max
class FastAdaptiveCatAvgMaxPool(nn.Module):
def __init__(self, flatten: bool = False, input_fmt: str = 'NCHW'):
super(FastAdaptiveCatAvgMaxPool, self).__init__()
self.flatten = flatten
self.dim_reduce = get_spatial_dim(input_fmt)
if flatten:
self.dim_cat = 1
else:
self.dim_cat = get_channel_dim(input_fmt)
def forward(self, x):
x_avg = x.mean(self.dim_reduce, keepdim=not self.flatten)
x_max = x.amax(self.dim_reduce, keepdim=not self.flatten)
return torch.cat((x_avg, x_max), self.dim_cat)
class AdaptiveAvgMaxPool2d(nn.Module):
def __init__(self, output_size: _int_tuple_2_t = 1):
super(AdaptiveAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_avgmax_pool2d(x, self.output_size)
class AdaptiveCatAvgMaxPool2d(nn.Module):
def __init__(self, output_size: _int_tuple_2_t = 1):
super(AdaptiveCatAvgMaxPool2d, self).__init__()
self.output_size = output_size
def forward(self, x):
return adaptive_catavgmax_pool2d(x, self.output_size)
class SelectAdaptivePool2d(nn.Module):
"""Selectable global pooling layer with dynamic input kernel size
"""
def __init__(
self,
output_size: _int_tuple_2_t = 1,
pool_type: str = 'fast',
flatten: bool = False,
input_fmt: str = 'NCHW',
):
super(SelectAdaptivePool2d, self).__init__()
assert input_fmt in ('NCHW', 'NHWC')
self.pool_type = pool_type or '' # convert other falsy values to empty string for consistent TS typing
pool_type = pool_type.lower()
if not pool_type:
self.pool = nn.Identity() # pass through
self.flatten = nn.Flatten(1) if flatten else nn.Identity()
elif pool_type.startswith('fast') or input_fmt != 'NCHW':
assert output_size == 1, 'Fast pooling and non NCHW input formats require output_size == 1.'
if pool_type.endswith('catavgmax'):
self.pool = FastAdaptiveCatAvgMaxPool(flatten, input_fmt=input_fmt)
elif pool_type.endswith('avgmax'):
self.pool = FastAdaptiveAvgMaxPool(flatten, input_fmt=input_fmt)
elif pool_type.endswith('max'):
self.pool = FastAdaptiveMaxPool(flatten, input_fmt=input_fmt)
elif pool_type == 'fast' or pool_type.endswith('avg'):
self.pool = FastAdaptiveAvgPool(flatten, input_fmt=input_fmt)
else:
assert False, 'Invalid pool type: %s' % pool_type
self.flatten = nn.Identity()
else:
assert input_fmt == 'NCHW'
if pool_type == 'avgmax':
self.pool = AdaptiveAvgMaxPool2d(output_size)
elif pool_type == 'catavgmax':
self.pool = AdaptiveCatAvgMaxPool2d(output_size)
elif pool_type == 'max':
self.pool = nn.AdaptiveMaxPool2d(output_size)
elif pool_type == 'avg':
self.pool = nn.AdaptiveAvgPool2d(output_size)
else:
assert False, 'Invalid pool type: %s' % pool_type
self.flatten = nn.Flatten(1) if flatten else nn.Identity()
def is_identity(self):
return not self.pool_type
def forward(self, x):
x = self.pool(x)
x = self.flatten(x)
return x
def feat_mult(self):
return adaptive_pool_feat_mult(self.pool_type)
def __repr__(self):
return self.__class__.__name__ + '(' \
+ 'pool_type=' + self.pool_type \
+ ', flatten=' + str(self.flatten) + ')'
| pytorch-image-models/timm/layers/adaptive_avgmax_pool.py/0 | {
"file_path": "pytorch-image-models/timm/layers/adaptive_avgmax_pool.py",
"repo_id": "pytorch-image-models",
"token_count": 3039
} | 362 |
""" DropBlock, DropPath
PyTorch implementations of DropBlock and DropPath (Stochastic Depth) regularization layers.
Papers:
DropBlock: A regularization method for convolutional networks (https://arxiv.org/abs/1810.12890)
Deep Networks with Stochastic Depth (https://arxiv.org/abs/1603.09382)
Code:
DropBlock impl inspired by two Tensorflow impl that I liked:
- https://github.com/tensorflow/tpu/blob/master/models/official/resnet/resnet_model.py#L74
- https://github.com/clovaai/assembled-cnn/blob/master/nets/blocks.py
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from .grid import ndgrid
def drop_block_2d(
x,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False
):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. This layer has been tested on a few training
runs with success, but needs further validation and possibly optimization for lower runtime impact.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
# seed_drop_rate, the gamma parameter
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
# Forces the block to be inside the feature map.
w_i, h_i = ndgrid(torch.arange(W, device=x.device), torch.arange(H, device=x.device))
valid_block = ((w_i >= clipped_block_size // 2) & (w_i < W - (clipped_block_size - 1) // 2)) & \
((h_i >= clipped_block_size // 2) & (h_i < H - (clipped_block_size - 1) // 2))
valid_block = torch.reshape(valid_block, (1, 1, H, W)).to(dtype=x.dtype)
if batchwise:
# one mask for whole batch, quite a bit faster
uniform_noise = torch.rand((1, C, H, W), dtype=x.dtype, device=x.device)
else:
uniform_noise = torch.rand_like(x)
block_mask = ((2 - gamma - valid_block + uniform_noise) >= 1).to(dtype=x.dtype)
block_mask = -F.max_pool2d(
-block_mask,
kernel_size=clipped_block_size, # block_size,
stride=1,
padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.randn((1, C, H, W), dtype=x.dtype, device=x.device) if batchwise else torch.randn_like(x)
if inplace:
x.mul_(block_mask).add_(normal_noise * (1 - block_mask))
else:
x = x * block_mask + normal_noise * (1 - block_mask)
else:
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-7)).to(x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
def drop_block_fast_2d(
x: torch.Tensor,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
DropBlock with an experimental gaussian noise option. Simplied from above without concern for valid
block mask at edges.
"""
B, C, H, W = x.shape
total_size = W * H
clipped_block_size = min(block_size, min(W, H))
gamma = gamma_scale * drop_prob * total_size / clipped_block_size ** 2 / (
(W - block_size + 1) * (H - block_size + 1))
block_mask = torch.empty_like(x).bernoulli_(gamma)
block_mask = F.max_pool2d(
block_mask.to(x.dtype), kernel_size=clipped_block_size, stride=1, padding=clipped_block_size // 2)
if with_noise:
normal_noise = torch.empty_like(x).normal_()
if inplace:
x.mul_(1. - block_mask).add_(normal_noise * block_mask)
else:
x = x * (1. - block_mask) + normal_noise * block_mask
else:
block_mask = 1 - block_mask
normalize_scale = (block_mask.numel() / block_mask.to(dtype=torch.float32).sum().add(1e-6)).to(dtype=x.dtype)
if inplace:
x.mul_(block_mask * normalize_scale)
else:
x = x * block_mask * normalize_scale
return x
class DropBlock2d(nn.Module):
""" DropBlock. See https://arxiv.org/pdf/1810.12890.pdf
"""
def __init__(
self,
drop_prob: float = 0.1,
block_size: int = 7,
gamma_scale: float = 1.0,
with_noise: bool = False,
inplace: bool = False,
batchwise: bool = False,
fast: bool = True):
super(DropBlock2d, self).__init__()
self.drop_prob = drop_prob
self.gamma_scale = gamma_scale
self.block_size = block_size
self.with_noise = with_noise
self.inplace = inplace
self.batchwise = batchwise
self.fast = fast # FIXME finish comparisons of fast vs not
def forward(self, x):
if not self.training or not self.drop_prob:
return x
if self.fast:
return drop_block_fast_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace)
else:
return drop_block_2d(
x, self.drop_prob, self.block_size, self.gamma_scale, self.with_noise, self.inplace, self.batchwise)
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f'drop_prob={round(self.drop_prob,3):0.3f}'
| pytorch-image-models/timm/layers/drop.py/0 | {
"file_path": "pytorch-image-models/timm/layers/drop.py",
"repo_id": "pytorch-image-models",
"token_count": 3016
} | 363 |
""" Median Pool
Hacked together by / Copyright 2020 Ross Wightman
"""
import torch.nn as nn
import torch.nn.functional as F
from .helpers import to_2tuple, to_4tuple
class MedianPool2d(nn.Module):
""" Median pool (usable as median filter when stride=1) module.
Args:
kernel_size: size of pooling kernel, int or 2-tuple
stride: pool stride, int or 2-tuple
padding: pool padding, int or 4-tuple (l, r, t, b) as in pytorch F.pad
same: override padding and enforce same padding, boolean
"""
def __init__(self, kernel_size=3, stride=1, padding=0, same=False):
super(MedianPool2d, self).__init__()
self.k = to_2tuple(kernel_size)
self.stride = to_2tuple(stride)
self.padding = to_4tuple(padding) # convert to l, r, t, b
self.same = same
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else:
padding = self.padding
return padding
def forward(self, x):
x = F.pad(x, self._padding(x), mode='reflect')
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,)).median(dim=-1)[0]
return x
| pytorch-image-models/timm/layers/median_pool.py/0 | {
"file_path": "pytorch-image-models/timm/layers/median_pool.py",
"repo_id": "pytorch-image-models",
"token_count": 883
} | 364 |
import torch
import torch.nn as nn
class SpaceToDepth(nn.Module):
bs: torch.jit.Final[int]
def __init__(self, block_size=4):
super().__init__()
assert block_size == 4
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs)
x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs)
x = x.view(N, C * self.bs * self.bs, H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs)
return x
class DepthToSpace(nn.Module):
def __init__(self, block_size):
super().__init__()
self.bs = block_size
def forward(self, x):
N, C, H, W = x.size()
x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W)
x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs)
x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs)
return x
| pytorch-image-models/timm/layers/space_to_depth.py/0 | {
"file_path": "pytorch-image-models/timm/layers/space_to_depth.py",
"repo_id": "pytorch-image-models",
"token_count": 568
} | 365 |
""" EfficientNet, MobileNetV3, etc Blocks
Hacked together by / Copyright 2019, Ross Wightman
"""
import torch
import torch.nn as nn
from torch.nn import functional as F
from timm.layers import create_conv2d, DropPath, make_divisible, create_act_layer, get_norm_act_layer
__all__ = [
'SqueezeExcite', 'ConvBnAct', 'DepthwiseSeparableConv', 'InvertedResidual', 'CondConvResidual', 'EdgeResidual']
def num_groups(group_size, channels):
if not group_size: # 0 or None
return 1 # normal conv with 1 group
else:
# NOTE group_size == 1 -> depthwise conv
assert channels % group_size == 0
return channels // group_size
class SqueezeExcite(nn.Module):
""" Squeeze-and-Excitation w/ specific features for EfficientNet/MobileNet family
Args:
in_chs (int): input channels to layer
rd_ratio (float): ratio of squeeze reduction
act_layer (nn.Module): activation layer of containing block
gate_layer (Callable): attention gate function
force_act_layer (nn.Module): override block's activation fn if this is set/bound
rd_round_fn (Callable): specify a fn to calculate rounding of reduced chs
"""
def __init__(
self, in_chs, rd_ratio=0.25, rd_channels=None, act_layer=nn.ReLU,
gate_layer=nn.Sigmoid, force_act_layer=None, rd_round_fn=None):
super(SqueezeExcite, self).__init__()
if rd_channels is None:
rd_round_fn = rd_round_fn or round
rd_channels = rd_round_fn(in_chs * rd_ratio)
act_layer = force_act_layer or act_layer
self.conv_reduce = nn.Conv2d(in_chs, rd_channels, 1, bias=True)
self.act1 = create_act_layer(act_layer, inplace=True)
self.conv_expand = nn.Conv2d(rd_channels, in_chs, 1, bias=True)
self.gate = create_act_layer(gate_layer)
def forward(self, x):
x_se = x.mean((2, 3), keepdim=True)
x_se = self.conv_reduce(x_se)
x_se = self.act1(x_se)
x_se = self.conv_expand(x_se)
return x * self.gate(x_se)
class ConvBnAct(nn.Module):
""" Conv + Norm Layer + Activation w/ optional skip connection
"""
def __init__(
self, in_chs, out_chs, kernel_size, stride=1, dilation=1, group_size=0, pad_type='',
skip=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d, drop_path_rate=0.):
super(ConvBnAct, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
groups = num_groups(group_size, in_chs)
self.has_skip = skip and stride == 1 and in_chs == out_chs
self.conv = create_conv2d(
in_chs, out_chs, kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type)
self.bn1 = norm_act_layer(out_chs, inplace=True)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # output of conv after act, same as block coutput
return dict(module='bn1', hook_type='forward', num_chs=self.conv.out_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv.out_channels)
def forward(self, x):
shortcut = x
x = self.conv(x)
x = self.bn1(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class DepthwiseSeparableConv(nn.Module):
""" DepthwiseSeparable block
Used for DS convs in MobileNet-V1 and in the place of IR blocks that have no expansion
(factor of 1.0). This is an alternative to having a IR with an optional first pw conv.
"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
noskip=False, pw_kernel_size=1, pw_act=False, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d,
se_layer=None, drop_path_rate=0.):
super(DepthwiseSeparableConv, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
groups = num_groups(group_size, in_chs)
self.has_skip = (stride == 1 and in_chs == out_chs) and not noskip
self.has_pw_act = pw_act # activation after point-wise conv
self.conv_dw = create_conv2d(
in_chs, in_chs, dw_kernel_size, stride=stride, dilation=dilation, padding=pad_type, groups=groups)
self.bn1 = norm_act_layer(in_chs, inplace=True)
# Squeeze-and-excitation
self.se = se_layer(in_chs, act_layer=act_layer) if se_layer else nn.Identity()
self.conv_pw = create_conv2d(in_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_act_layer(out_chs, inplace=True, apply_act=self.has_pw_act)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PW
return dict(module='conv_pw', hook_type='forward_pre', num_chs=self.conv_pw.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pw.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.bn1(x)
x = self.se(x)
x = self.conv_pw(x)
x = self.bn2(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class InvertedResidual(nn.Module):
""" Inverted residual block w/ optional SE
Originally used in MobileNet-V2 - https://arxiv.org/abs/1801.04381v4, this layer is often
referred to as 'MBConv' for (Mobile inverted bottleneck conv) and is also used in
* MNasNet - https://arxiv.org/abs/1807.11626
* EfficientNet - https://arxiv.org/abs/1905.11946
* MobileNet-V3 - https://arxiv.org/abs/1905.02244
"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, conv_kwargs=None, drop_path_rate=0.):
super(InvertedResidual, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
conv_kwargs = conv_kwargs or {}
mid_chs = make_divisible(in_chs * exp_ratio)
groups = num_groups(group_size, mid_chs)
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
# Point-wise expansion
self.conv_pw = create_conv2d(in_chs, mid_chs, exp_kernel_size, padding=pad_type, **conv_kwargs)
self.bn1 = norm_act_layer(mid_chs, inplace=True)
# Depth-wise convolution
self.conv_dw = create_conv2d(
mid_chs, mid_chs, dw_kernel_size, stride=stride, dilation=dilation,
groups=groups, padding=pad_type, **conv_kwargs)
self.bn2 = norm_act_layer(mid_chs, inplace=True)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type, **conv_kwargs)
self.bn3 = norm_act_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, input to PWL
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pwl.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_pw(x)
x = self.bn1(x)
x = self.conv_dw(x)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn3(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class CondConvResidual(InvertedResidual):
""" Inverted residual block w/ CondConv routing"""
def __init__(
self, in_chs, out_chs, dw_kernel_size=3, stride=1, dilation=1, group_size=1, pad_type='',
noskip=False, exp_ratio=1.0, exp_kernel_size=1, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, num_experts=0, drop_path_rate=0.):
self.num_experts = num_experts
conv_kwargs = dict(num_experts=self.num_experts)
super(CondConvResidual, self).__init__(
in_chs, out_chs, dw_kernel_size=dw_kernel_size, stride=stride, dilation=dilation, group_size=group_size,
pad_type=pad_type, act_layer=act_layer, noskip=noskip, exp_ratio=exp_ratio, exp_kernel_size=exp_kernel_size,
pw_kernel_size=pw_kernel_size, se_layer=se_layer, norm_layer=norm_layer, conv_kwargs=conv_kwargs,
drop_path_rate=drop_path_rate)
self.routing_fn = nn.Linear(in_chs, self.num_experts)
def forward(self, x):
shortcut = x
pooled_inputs = F.adaptive_avg_pool2d(x, 1).flatten(1) # CondConv routing
routing_weights = torch.sigmoid(self.routing_fn(pooled_inputs))
x = self.conv_pw(x, routing_weights)
x = self.bn1(x)
x = self.conv_dw(x, routing_weights)
x = self.bn2(x)
x = self.se(x)
x = self.conv_pwl(x, routing_weights)
x = self.bn3(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
class EdgeResidual(nn.Module):
""" Residual block with expansion convolution followed by pointwise-linear w/ stride
Originally introduced in `EfficientNet-EdgeTPU: Creating Accelerator-Optimized Neural Networks with AutoML`
- https://ai.googleblog.com/2019/08/efficientnet-edgetpu-creating.html
This layer is also called FusedMBConv in the MobileDet, EfficientNet-X, and EfficientNet-V2 papers
* MobileDet - https://arxiv.org/abs/2004.14525
* EfficientNet-X - https://arxiv.org/abs/2102.05610
* EfficientNet-V2 - https://arxiv.org/abs/2104.00298
"""
def __init__(
self, in_chs, out_chs, exp_kernel_size=3, stride=1, dilation=1, group_size=0, pad_type='',
force_in_chs=0, noskip=False, exp_ratio=1.0, pw_kernel_size=1, act_layer=nn.ReLU,
norm_layer=nn.BatchNorm2d, se_layer=None, drop_path_rate=0.):
super(EdgeResidual, self).__init__()
norm_act_layer = get_norm_act_layer(norm_layer, act_layer)
if force_in_chs > 0:
mid_chs = make_divisible(force_in_chs * exp_ratio)
else:
mid_chs = make_divisible(in_chs * exp_ratio)
groups = num_groups(group_size, in_chs)
self.has_skip = (in_chs == out_chs and stride == 1) and not noskip
# Expansion convolution
self.conv_exp = create_conv2d(
in_chs, mid_chs, exp_kernel_size, stride=stride, dilation=dilation, groups=groups, padding=pad_type)
self.bn1 = norm_act_layer(mid_chs, inplace=True)
# Squeeze-and-excitation
self.se = se_layer(mid_chs, act_layer=act_layer) if se_layer else nn.Identity()
# Point-wise linear projection
self.conv_pwl = create_conv2d(mid_chs, out_chs, pw_kernel_size, padding=pad_type)
self.bn2 = norm_act_layer(out_chs, apply_act=False)
self.drop_path = DropPath(drop_path_rate) if drop_path_rate else nn.Identity()
def feature_info(self, location):
if location == 'expansion': # after SE, before PWL
return dict(module='conv_pwl', hook_type='forward_pre', num_chs=self.conv_pwl.in_channels)
else: # location == 'bottleneck', block output
return dict(module='', num_chs=self.conv_pwl.out_channels)
def forward(self, x):
shortcut = x
x = self.conv_exp(x)
x = self.bn1(x)
x = self.se(x)
x = self.conv_pwl(x)
x = self.bn2(x)
if self.has_skip:
x = self.drop_path(x) + shortcut
return x
| pytorch-image-models/timm/models/_efficientnet_blocks.py/0 | {
"file_path": "pytorch-image-models/timm/models/_efficientnet_blocks.py",
"repo_id": "pytorch-image-models",
"token_count": 5589
} | 366 |
""" BEiT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
Model from official source: https://github.com/microsoft/unilm/tree/master/beit
@inproceedings{beit,
title={{BEiT}: {BERT} Pre-Training of Image Transformers},
author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei},
booktitle={International Conference on Learning Representations},
year={2022},
url={https://openreview.net/forum?id=p-BhZSz59o4}
}
BEiT-v2 from https://github.com/microsoft/unilm/tree/master/beit2
@article{beitv2,
title={{BEiT v2}: Masked Image Modeling with Vector-Quantized Visual Tokenizers},
author={Zhiliang Peng and Li Dong and Hangbo Bao and Qixiang Ye and Furu Wei},
year={2022},
eprint={2208.06366},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
At this point only the 1k fine-tuned classification weights and model configs have been added,
see original source above for pre-training models and procedure.
Modifications by / Copyright 2021 Ross Wightman, original copyrights below
"""
# --------------------------------------------------------
# BEIT: BERT Pre-Training of Image Transformers (https://arxiv.org/abs/2106.08254)
# Github source: https://github.com/microsoft/unilm/tree/master/beit
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# By Hangbo Bao
# Based on timm and DeiT code bases
# https://github.com/rwightman/pytorch-image-models/tree/master/timm
# https://github.com/facebookresearch/deit/
# https://github.com/facebookresearch/dino
# --------------------------------------------------------'
import math
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import PatchEmbed, Mlp, SwiGLU, LayerNorm, DropPath, trunc_normal_, use_fused_attn
from timm.layers import resample_patch_embed, resample_abs_pos_embed, resize_rel_pos_bias_table, ndgrid
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._registry import generate_default_cfgs, register_model
__all__ = ['Beit']
def gen_relative_position_index(window_size: Tuple[int, int]) -> torch.Tensor:
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
# cls to token & token 2 cls & cls to cls
# get pair-wise relative position index for each token inside the window
window_area = window_size[0] * window_size[1]
coords = torch.stack(ndgrid(torch.arange(window_size[0]), torch.arange(window_size[1]))) # 2, Wh, Ww
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
relative_coords[:, :, 0] += window_size[0] - 1 # shift to start from 0
relative_coords[:, :, 1] += window_size[1] - 1
relative_coords[:, :, 0] *= 2 * window_size[1] - 1
relative_position_index = torch.zeros(size=(window_area + 1,) * 2, dtype=relative_coords.dtype)
relative_position_index[1:, 1:] = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
relative_position_index[0, 0:] = num_relative_distance - 3
relative_position_index[0:, 0] = num_relative_distance - 2
relative_position_index[0, 0] = num_relative_distance - 1
return relative_position_index
class Attention(nn.Module):
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
attn_drop: float = 0.,
proj_drop: float = 0.,
window_size: Optional[Tuple[int, int]] = None,
attn_head_dim: Optional[int] = None,
):
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
if attn_head_dim is not None:
head_dim = attn_head_dim
all_head_dim = head_dim * self.num_heads
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, all_head_dim * 3, bias=False)
if qkv_bias:
self.q_bias = nn.Parameter(torch.zeros(all_head_dim))
self.register_buffer('k_bias', torch.zeros(all_head_dim), persistent=False)
self.v_bias = nn.Parameter(torch.zeros(all_head_dim))
else:
self.q_bias = None
self.k_bias = None
self.v_bias = None
if window_size:
self.window_size = window_size
self.num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(
torch.zeros(self.num_relative_distance, num_heads)) # 2*Wh-1 * 2*Ww-1, nH
self.register_buffer("relative_position_index", gen_relative_position_index(window_size), persistent=False)
else:
self.window_size = None
self.relative_position_bias_table = None
self.relative_position_index = None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(all_head_dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def _get_rel_pos_bias(self):
relative_position_bias = self.relative_position_bias_table[
self.relative_position_index.view(-1)].view(
self.window_size[0] * self.window_size[1] + 1,
self.window_size[0] * self.window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
return relative_position_bias.unsqueeze(0)
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
B, N, C = x.shape
qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0) # B, num_heads, N, head_dim
if self.fused_attn:
rel_pos_bias = None
if self.relative_position_bias_table is not None:
rel_pos_bias = self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
rel_pos_bias = rel_pos_bias + shared_rel_pos_bias
elif shared_rel_pos_bias is not None:
rel_pos_bias = shared_rel_pos_bias
x = F.scaled_dot_product_attention(
q, k, v,
attn_mask=rel_pos_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = (q @ k.transpose(-2, -1))
if self.relative_position_bias_table is not None:
attn = attn + self._get_rel_pos_bias()
if shared_rel_pos_bias is not None:
attn = attn + shared_rel_pos_bias
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class Block(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
qkv_bias: bool = False,
mlp_ratio: float = 4.,
scale_mlp: bool = False,
swiglu_mlp: bool = False,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
init_values: Optional[float] = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
window_size: Optional[Tuple[int, int]] = None,
attn_head_dim: Optional[int] = None,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(
dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
window_size=window_size,
attn_head_dim=attn_head_dim,
)
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
if swiglu_mlp:
self.mlp = SwiGLU(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
else:
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer if scale_mlp else None,
drop=proj_drop,
)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
if init_values:
self.gamma_1 = nn.Parameter(init_values * torch.ones(dim))
self.gamma_2 = nn.Parameter(init_values * torch.ones(dim))
else:
self.gamma_1, self.gamma_2 = None, None
def forward(self, x, shared_rel_pos_bias: Optional[torch.Tensor] = None):
if self.gamma_1 is None:
x = x + self.drop_path1(self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path2(self.mlp(self.norm2(x)))
else:
x = x + self.drop_path1(self.gamma_1 * self.attn(self.norm1(x), shared_rel_pos_bias=shared_rel_pos_bias))
x = x + self.drop_path2(self.gamma_2 * self.mlp(self.norm2(x)))
return x
class RelativePositionBias(nn.Module):
def __init__(self, window_size, num_heads):
super().__init__()
self.window_size = window_size
self.window_area = window_size[0] * window_size[1]
num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3
self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads))
# trunc_normal_(self.relative_position_bias_table, std=.02)
self.register_buffer("relative_position_index", gen_relative_position_index(window_size))
def forward(self):
relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(
self.window_area + 1, self.window_area + 1, -1) # Wh*Ww,Wh*Ww,nH
return relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
class Beit(nn.Module):
""" Vision Transformer with support for patch or hybrid CNN input stage
"""
def __init__(
self,
img_size: Union[int, Tuple[int, int]] = 224,
patch_size: Union[int, Tuple[int, int]] = 16,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
embed_dim: int = 768,
depth: int = 12,
num_heads: int = 12,
qkv_bias: bool = True,
mlp_ratio: float = 4.,
swiglu_mlp: bool = False,
scale_mlp: bool = False,
drop_rate: float = 0.,
pos_drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
norm_layer: Callable = LayerNorm,
init_values: Optional[float] = None,
use_abs_pos_emb: bool = True,
use_rel_pos_bias: bool = False,
use_shared_rel_pos_bias: bool = False,
head_init_scale: float = 0.001,
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1
self.grad_checkpointing = False
self.patch_embed = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
num_patches = self.patch_embed.num_patches
r = self.patch_embed.feat_ratio() if hasattr(self.patch_embed, 'feat_ratio') else patch_size
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
# self.mask_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
self.pos_embed = nn.Parameter(torch.zeros(1, num_patches + 1, embed_dim)) if use_abs_pos_emb else None
self.pos_drop = nn.Dropout(p=pos_drop_rate)
if use_shared_rel_pos_bias:
self.rel_pos_bias = RelativePositionBias(
window_size=self.patch_embed.grid_size,
num_heads=num_heads,
)
else:
self.rel_pos_bias = None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
Block(
dim=embed_dim,
num_heads=num_heads,
qkv_bias=qkv_bias,
mlp_ratio=mlp_ratio,
scale_mlp=scale_mlp,
swiglu_mlp=swiglu_mlp,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
init_values=init_values,
window_size=self.patch_embed.grid_size if use_rel_pos_bias else None,
)
for i in range(depth)])
self.feature_info = [
dict(module=f'blocks.{i}', num_chs=embed_dim, reduction=r) for i in range(depth)]
use_fc_norm = self.global_pool == 'avg'
self.norm = nn.Identity() if use_fc_norm else norm_layer(embed_dim)
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
if self.pos_embed is not None:
trunc_normal_(self.pos_embed, std=.02)
trunc_normal_(self.cls_token, std=.02)
self.fix_init_weight()
if isinstance(self.head, nn.Linear):
trunc_normal_(self.head.weight, std=.02)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def fix_init_weight(self):
def rescale(param, layer_id):
param.div_(math.sqrt(2.0 * layer_id))
for layer_id, layer in enumerate(self.blocks):
rescale(layer.attn.proj.weight.data, layer_id + 1)
rescale(layer.mlp.fc2.weight.data, layer_id + 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
@torch.jit.ignore
def no_weight_decay(self):
nwd = {'pos_embed', 'cls_token'}
for n, _ in self.named_parameters():
if 'relative_position_bias_table' in n:
nwd.add(n)
return nwd
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^cls_token|pos_embed|patch_embed|rel_pos_bias', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))],
)
return matcher
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int], Tuple[int]]] = None,
return_prefix_tokens: bool = False,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if an int, if is a sequence, select by matching indices
return_prefix_tokens: Return both prefix and spatial intermediate tokens
norm: Apply norm layer to all intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW', 'NLC'), 'Output format must be one of NCHW or NLC.'
reshape = output_fmt == 'NCHW'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
# forward pass
B, _, height, width = x.shape
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
blocks = self.blocks
else:
blocks = self.blocks[:max_index + 1]
for i, blk in enumerate(blocks):
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
if i in take_indices:
# normalize intermediates with final norm layer if enabled
intermediates.append(self.norm(x) if norm else x)
# process intermediates
if self.num_prefix_tokens:
# split prefix (e.g. class, distill) and spatial feature tokens
prefix_tokens = [y[:, 0:self.num_prefix_tokens] for y in intermediates]
intermediates = [y[:, self.num_prefix_tokens:] for y in intermediates]
if reshape:
# reshape to BCHW output format
H, W = self.patch_embed.dynamic_feat_size((height, width))
intermediates = [y.reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous() for y in intermediates]
if not torch.jit.is_scripting() and return_prefix_tokens:
# return_prefix not support in torchscript due to poor type handling
intermediates = list(zip(intermediates, prefix_tokens))
if intermediates_only:
return intermediates
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int], Tuple[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.blocks), indices)
self.blocks = self.blocks[:max_index + 1] # truncate blocks
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.fc_norm = nn.Identity()
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.patch_embed(x)
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
if self.pos_embed is not None:
x = x + self.pos_embed
x = self.pos_drop(x)
rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
else:
x = blk(x, shared_rel_pos_bias=rel_pos_bias)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5),
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'beit_base_patch16_224.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22kto1k.pth',
hf_hub_id='timm/'),
'beit_base_patch16_384.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_384_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_base_patch16_224.in22k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth',
hf_hub_id='timm/',
num_classes=21841,
),
'beit_large_patch16_224.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22kto1k.pth',
hf_hub_id='timm/'),
'beit_large_patch16_384.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_384_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 384, 384), crop_pct=1.0,
),
'beit_large_patch16_512.in22k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_512_pt22k_ft22kto1k.pth',
hf_hub_id='timm/',
input_size=(3, 512, 512), crop_pct=1.0,
),
'beit_large_patch16_224.in22k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth',
hf_hub_id='timm/',
num_classes=21841,
),
'beitv2_base_patch16_224.in1k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21kto1k.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224.in1k_ft_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft1k.pth',
hf_hub_id='timm/',
mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_base_patch16_224.in1k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_base_patch16_224_pt1k_ft21k.pth',
hf_hub_id='timm/',
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in22k_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21kto1k.pth',
hf_hub_id='timm/',
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in1k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft1k.pth',
hf_hub_id='timm/',
crop_pct=0.95, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
'beitv2_large_patch16_224.in1k_ft_in22k': _cfg(
#url='https://conversationhub.blob.core.windows.net/beit-share-public/beitv2/beitv2_large_patch16_224_pt1k_ft21k.pth',
hf_hub_id='timm/',
num_classes=21841, mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD
),
})
def _beit_checkpoint_filter_fn(state_dict, model, interpolation='bicubic', antialias=True):
state_dict = state_dict.get('model', state_dict)
state_dict = state_dict.get('module', state_dict)
# beit v2 didn't strip module
out_dict = {}
for k, v in state_dict.items():
if 'relative_position_index' in k:
continue
if 'patch_embed.proj.weight' in k:
O, I, H, W = model.patch_embed.proj.weight.shape
if v.shape[-1] != W or v.shape[-2] != H:
v = resample_patch_embed(
v,
(H, W),
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
num_prefix_tokens = 1
v = resample_abs_pos_embed(
v,
new_size=model.patch_embed.grid_size,
num_prefix_tokens=num_prefix_tokens,
interpolation=interpolation,
antialias=antialias,
verbose=True,
)
elif k.endswith('relative_position_bias_table'):
m = model.get_submodule(k[:-29])
if v.shape != m.relative_position_bias_table.shape or m.window_size[0] != m.window_size[1]:
v = resize_rel_pos_bias_table(
v,
new_window_size=m.window_size,
new_bias_shape=m.relative_position_bias_table.shape,
)
out_dict[k] = v
return out_dict
def _create_beit(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 3)
model = build_model_with_cfg(
Beit, variant, pretrained,
pretrained_filter_fn=_beit_checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
@register_model
def beit_base_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
model = _create_beit('beit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_base_patch16_384(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=384, patch_size=16, embed_dim=768, depth=12, num_heads=12,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=0.1)
model = _create_beit('beit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_384(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=384, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beit_large_patch16_512(pretrained=False, **kwargs) -> Beit:
model_args = dict(
img_size=512, patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beit_large_patch16_512', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beitv2_base_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, mlp_ratio=4,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beitv2_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def beitv2_large_patch16_224(pretrained=False, **kwargs) -> Beit:
model_args = dict(
patch_size=16, embed_dim=1024, depth=24, num_heads=16,
use_abs_pos_emb=False, use_rel_pos_bias=True, init_values=1e-5)
model = _create_beit('beitv2_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/beit.py/0 | {
"file_path": "pytorch-image-models/timm/models/beit.py",
"repo_id": "pytorch-image-models",
"token_count": 14186
} | 367 |
""" EfficientFormer
@article{li2022efficientformer,
title={EfficientFormer: Vision Transformers at MobileNet Speed},
author={Li, Yanyu and Yuan, Geng and Wen, Yang and Hu, Eric and Evangelidis, Georgios and Tulyakov,
Sergey and Wang, Yanzhi and Ren, Jian},
journal={arXiv preprint arXiv:2206.01191},
year={2022}
}
Based on Apache 2.0 licensed code at https://github.com/snap-research/EfficientFormer, Copyright (c) 2022 Snap Inc.
Modifications and timm support by / Copyright 2022, Ross Wightman
"""
from typing import Dict, List, Optional, Tuple, Union
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, to_2tuple, Mlp, ndgrid
from ._builder import build_model_with_cfg
from ._features import feature_take_indices
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['EfficientFormer'] # model_registry will add each entrypoint fn to this
EfficientFormer_width = {
'l1': (48, 96, 224, 448),
'l3': (64, 128, 320, 512),
'l7': (96, 192, 384, 768),
}
EfficientFormer_depth = {
'l1': (3, 2, 6, 4),
'l3': (4, 4, 12, 6),
'l7': (6, 6, 18, 8),
}
class Attention(torch.nn.Module):
attention_bias_cache: Dict[str, torch.Tensor]
def __init__(
self,
dim=384,
key_dim=32,
num_heads=8,
attn_ratio=4,
resolution=7
):
super().__init__()
self.num_heads = num_heads
self.scale = key_dim ** -0.5
self.key_dim = key_dim
self.key_attn_dim = key_dim * num_heads
self.val_dim = int(attn_ratio * key_dim)
self.val_attn_dim = self.val_dim * num_heads
self.attn_ratio = attn_ratio
self.qkv = nn.Linear(dim, self.key_attn_dim * 2 + self.val_attn_dim)
self.proj = nn.Linear(self.val_attn_dim, dim)
resolution = to_2tuple(resolution)
pos = torch.stack(ndgrid(torch.arange(resolution[0]), torch.arange(resolution[1]))).flatten(1)
rel_pos = (pos[..., :, None] - pos[..., None, :]).abs()
rel_pos = (rel_pos[0] * resolution[1]) + rel_pos[1]
self.attention_biases = torch.nn.Parameter(torch.zeros(num_heads, resolution[0] * resolution[1]))
self.register_buffer('attention_bias_idxs', rel_pos)
self.attention_bias_cache = {} # per-device attention_biases cache (data-parallel compat)
@torch.no_grad()
def train(self, mode=True):
super().train(mode)
if mode and self.attention_bias_cache:
self.attention_bias_cache = {} # clear ab cache
def get_attention_biases(self, device: torch.device) -> torch.Tensor:
if torch.jit.is_tracing() or self.training:
return self.attention_biases[:, self.attention_bias_idxs]
else:
device_key = str(device)
if device_key not in self.attention_bias_cache:
self.attention_bias_cache[device_key] = self.attention_biases[:, self.attention_bias_idxs]
return self.attention_bias_cache[device_key]
def forward(self, x): # x (B,N,C)
B, N, C = x.shape
qkv = self.qkv(x)
qkv = qkv.reshape(B, N, self.num_heads, -1).permute(0, 2, 1, 3)
q, k, v = qkv.split([self.key_dim, self.key_dim, self.val_dim], dim=3)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn + self.get_attention_biases(x.device)
attn = attn.softmax(dim=-1)
x = (attn @ v).transpose(1, 2).reshape(B, N, self.val_attn_dim)
x = self.proj(x)
return x
class Stem4(nn.Sequential):
def __init__(self, in_chs, out_chs, act_layer=nn.ReLU, norm_layer=nn.BatchNorm2d):
super().__init__()
self.stride = 4
self.add_module('conv1', nn.Conv2d(in_chs, out_chs // 2, kernel_size=3, stride=2, padding=1))
self.add_module('norm1', norm_layer(out_chs // 2))
self.add_module('act1', act_layer())
self.add_module('conv2', nn.Conv2d(out_chs // 2, out_chs, kernel_size=3, stride=2, padding=1))
self.add_module('norm2', norm_layer(out_chs))
self.add_module('act2', act_layer())
class Downsample(nn.Module):
"""
Downsampling via strided conv w/ norm
Input: tensor in shape [B, C, H, W]
Output: tensor in shape [B, C, H/stride, W/stride]
"""
def __init__(self, in_chs, out_chs, kernel_size=3, stride=2, padding=None, norm_layer=nn.BatchNorm2d):
super().__init__()
if padding is None:
padding = kernel_size // 2
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=kernel_size, stride=stride, padding=padding)
self.norm = norm_layer(out_chs)
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
return x
class Flat(nn.Module):
def __init__(self, ):
super().__init__()
def forward(self, x):
x = x.flatten(2).transpose(1, 2)
return x
class Pooling(nn.Module):
"""
Implementation of pooling for PoolFormer
--pool_size: pooling size
"""
def __init__(self, pool_size=3):
super().__init__()
self.pool = nn.AvgPool2d(pool_size, stride=1, padding=pool_size // 2, count_include_pad=False)
def forward(self, x):
return self.pool(x) - x
class ConvMlpWithNorm(nn.Module):
"""
Implementation of MLP with 1*1 convolutions.
Input: tensor with shape [B, C, H, W]
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
drop=0.
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, 1)
self.norm1 = norm_layer(hidden_features) if norm_layer is not None else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, 1)
self.norm2 = norm_layer(out_features) if norm_layer is not None else nn.Identity()
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.norm2(x)
x = self.drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class MetaBlock1d(nn.Module):
def __init__(
self,
dim,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5
):
super().__init__()
self.norm1 = norm_layer(dim)
self.token_mixer = Attention(dim)
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.ls1 = LayerScale(dim, layer_scale_init_value)
self.ls2 = LayerScale(dim, layer_scale_init_value)
def forward(self, x):
x = x + self.drop_path(self.ls1(self.token_mixer(self.norm1(x))))
x = x + self.drop_path(self.ls2(self.mlp(self.norm2(x))))
return x
class LayerScale2d(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
gamma = self.gamma.view(1, -1, 1, 1)
return x.mul_(gamma) if self.inplace else x * gamma
class MetaBlock2d(nn.Module):
def __init__(
self,
dim,
pool_size=3,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
proj_drop=0.,
drop_path=0.,
layer_scale_init_value=1e-5
):
super().__init__()
self.token_mixer = Pooling(pool_size=pool_size)
self.ls1 = LayerScale2d(dim, layer_scale_init_value)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = ConvMlpWithNorm(
dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
norm_layer=norm_layer,
drop=proj_drop,
)
self.ls2 = LayerScale2d(dim, layer_scale_init_value)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.ls1(self.token_mixer(x)))
x = x + self.drop_path2(self.ls2(self.mlp(x)))
return x
class EfficientFormerStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
downsample=True,
num_vit=1,
pool_size=3,
mlp_ratio=4.,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_cl=nn.LayerNorm,
proj_drop=.0,
drop_path=0.,
layer_scale_init_value=1e-5,
):
super().__init__()
self.grad_checkpointing = False
if downsample:
self.downsample = Downsample(in_chs=dim, out_chs=dim_out, norm_layer=norm_layer)
dim = dim_out
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
if num_vit and num_vit >= depth:
blocks.append(Flat())
for block_idx in range(depth):
remain_idx = depth - block_idx - 1
if num_vit and num_vit > remain_idx:
blocks.append(
MetaBlock1d(
dim,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer_cl,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
else:
blocks.append(
MetaBlock2d(
dim,
pool_size=pool_size,
mlp_ratio=mlp_ratio,
act_layer=act_layer,
norm_layer=norm_layer,
proj_drop=proj_drop,
drop_path=drop_path[block_idx],
layer_scale_init_value=layer_scale_init_value,
))
if num_vit and num_vit == remain_idx:
blocks.append(Flat())
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class EfficientFormer(nn.Module):
def __init__(
self,
depths,
embed_dims=None,
in_chans=3,
num_classes=1000,
global_pool='avg',
downsamples=None,
num_vit=0,
mlp_ratios=4,
pool_size=3,
layer_scale_init_value=1e-5,
act_layer=nn.GELU,
norm_layer=nn.BatchNorm2d,
norm_layer_cl=nn.LayerNorm,
drop_rate=0.,
proj_drop_rate=0.,
drop_path_rate=0.,
**kwargs
):
super().__init__()
self.num_classes = num_classes
self.global_pool = global_pool
self.stem = Stem4(in_chans, embed_dims[0], norm_layer=norm_layer)
prev_dim = embed_dims[0]
# stochastic depth decay rule
self.num_stages = len(depths)
last_stage = self.num_stages - 1
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
downsamples = downsamples or (False,) + (True,) * (self.num_stages - 1)
stages = []
self.feature_info = []
for i in range(self.num_stages):
stage = EfficientFormerStage(
prev_dim,
embed_dims[i],
depths[i],
downsample=downsamples[i],
num_vit=num_vit if i == last_stage else 0,
pool_size=pool_size,
mlp_ratio=mlp_ratios,
act_layer=act_layer,
norm_layer_cl=norm_layer_cl,
norm_layer=norm_layer,
proj_drop=proj_drop_rate,
drop_path=dpr[i],
layer_scale_init_value=layer_scale_init_value,
)
prev_dim = embed_dims[i]
stages.append(stage)
self.feature_info += [dict(num_chs=embed_dims[i], reduction=2**(1+i), module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
# Classifier head
self.num_features = embed_dims[-1]
self.norm = norm_layer_cl(self.num_features)
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
# assuming model is always distilled (valid for current checkpoints, will split def if that changes)
self.head_dist = nn.Linear(embed_dims[-1], num_classes) if num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
self.apply(self._init_weights)
# init for classification
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def no_weight_decay(self):
return {k for k, _ in self.named_parameters() if 'attention_biases' in k}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=[(r'^stages\.(\d+)', None), (r'^norm', (99999,))]
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
self.global_pool = global_pool
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def forward_intermediates(
self,
x: torch.Tensor,
indices: Optional[Union[int, List[int], Tuple[int]]] = None,
norm: bool = False,
stop_early: bool = False,
output_fmt: str = 'NCHW',
intermediates_only: bool = False,
) -> Union[List[torch.Tensor], Tuple[torch.Tensor, List[torch.Tensor]]]:
""" Forward features that returns intermediates.
Args:
x: Input image tensor
indices: Take last n blocks if int, all if None, select matching indices if sequence
norm: Apply norm layer to compatible intermediates
stop_early: Stop iterating over blocks when last desired intermediate hit
output_fmt: Shape of intermediate feature outputs
intermediates_only: Only return intermediate features
Returns:
"""
assert output_fmt in ('NCHW',), 'Output shape must be NCHW.'
intermediates = []
take_indices, max_index = feature_take_indices(len(self.stages), indices)
# forward pass
x = self.stem(x)
B, C, H, W = x.shape
last_idx = self.num_stages - 1
if torch.jit.is_scripting() or not stop_early: # can't slice blocks in torchscript
stages = self.stages
else:
stages = self.stages[:max_index + 1]
feat_idx = 0
for feat_idx, stage in enumerate(stages):
x = stage(x)
if feat_idx < last_idx:
B, C, H, W = x.shape
if feat_idx in take_indices:
if feat_idx == last_idx:
x_inter = self.norm(x) if norm else x
intermediates.append(x_inter.reshape(B, H // 2, W // 2, -1).permute(0, 3, 1, 2))
else:
intermediates.append(x)
if intermediates_only:
return intermediates
if feat_idx == last_idx:
x = self.norm(x)
return x, intermediates
def prune_intermediate_layers(
self,
indices: Union[int, List[int], Tuple[int]] = 1,
prune_norm: bool = False,
prune_head: bool = True,
):
""" Prune layers not required for specified intermediates.
"""
take_indices, max_index = feature_take_indices(len(self.stages), indices)
self.stages = self.stages[:max_index + 1] # truncate blocks w/ stem as idx 0
if prune_norm:
self.norm = nn.Identity()
if prune_head:
self.reset_classifier(0, '')
return take_indices
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool == 'avg':
x = x.mean(dim=1)
x = self.head_drop(x)
if pre_logits:
return x
x, x_dist = self.head(x), self.head_dist(x)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train/finetune, inference average the classifier predictions
return (x + x_dist) / 2
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stem.0.weight' in state_dict:
return state_dict # non-original checkpoint, no remapping needed
out_dict = {}
import re
stage_idx = 0
for k, v in state_dict.items():
if k.startswith('patch_embed'):
k = k.replace('patch_embed.0', 'stem.conv1')
k = k.replace('patch_embed.1', 'stem.norm1')
k = k.replace('patch_embed.3', 'stem.conv2')
k = k.replace('patch_embed.4', 'stem.norm2')
if re.match(r'network\.(\d+)\.proj\.weight', k):
stage_idx += 1
k = re.sub(r'network.(\d+).(\d+)', f'stages.{stage_idx}.blocks.\\2', k)
k = re.sub(r'network.(\d+).proj', f'stages.{stage_idx}.downsample.conv', k)
k = re.sub(r'network.(\d+).norm', f'stages.{stage_idx}.downsample.norm', k)
k = re.sub(r'layer_scale_([0-9])', r'ls\1.gamma', k)
k = k.replace('dist_head', 'head_dist')
out_dict[k] = v
return out_dict
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'fixed_input_size': True,
'crop_pct': .95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': ('head', 'head_dist'),
**kwargs
}
default_cfgs = generate_default_cfgs({
'efficientformer_l1.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformer_l3.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
'efficientformer_l7.snap_dist_in1k': _cfg(
hf_hub_id='timm/',
),
})
def _create_efficientformer(variant, pretrained=False, **kwargs):
out_indices = kwargs.pop('out_indices', 4)
model = build_model_with_cfg(
EfficientFormer, variant, pretrained,
pretrained_filter_fn=_checkpoint_filter_fn,
feature_cfg=dict(out_indices=out_indices, feature_cls='getter'),
**kwargs,
)
return model
@register_model
def efficientformer_l1(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l1'],
embed_dims=EfficientFormer_width['l1'],
num_vit=1,
)
return _create_efficientformer('efficientformer_l1', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformer_l3(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l3'],
embed_dims=EfficientFormer_width['l3'],
num_vit=4,
)
return _create_efficientformer('efficientformer_l3', pretrained=pretrained, **dict(model_args, **kwargs))
@register_model
def efficientformer_l7(pretrained=False, **kwargs) -> EfficientFormer:
model_args = dict(
depths=EfficientFormer_depth['l7'],
embed_dims=EfficientFormer_width['l7'],
num_vit=8,
)
return _create_efficientformer('efficientformer_l7', pretrained=pretrained, **dict(model_args, **kwargs))
| pytorch-image-models/timm/models/efficientformer.py/0 | {
"file_path": "pytorch-image-models/timm/models/efficientformer.py",
"repo_id": "pytorch-image-models",
"token_count": 10897
} | 368 |
""" HRNet
Copied from https://github.com/HRNet/HRNet-Image-Classification
Original header:
Copyright (c) Microsoft
Licensed under the MIT License.
Written by Bin Xiao (Bin.Xiao@microsoft.com)
Modified by Ke Sun (sunk@mail.ustc.edu.cn)
"""
import logging
from typing import List
import torch
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import create_classifier
from ._builder import build_model_with_cfg, pretrained_cfg_for_features
from ._features import FeatureInfo
from ._registry import register_model, generate_default_cfgs
from .resnet import BasicBlock, Bottleneck # leveraging ResNet block_types w/ additional features like SE
__all__ = ['HighResolutionNet', 'HighResolutionNetFeatures'] # model_registry will add each entrypoint fn to this
_BN_MOMENTUM = 0.1
_logger = logging.getLogger(__name__)
cfg_cls = dict(
hrnet_w18_small=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(1,),
num_channels=(32,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(2, 2),
num_channels=(16, 32),
fuse_method='SUM'
),
stage3=dict(
num_modules=1,
num_branches=3,
block_type='BASIC',
num_blocks=(2, 2, 2),
num_channels=(16, 32, 64),
fuse_method='SUM'
),
stage4=dict(
num_modules=1,
num_branches=4,
block_type='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(16, 32, 64, 128),
fuse_method='SUM',
),
),
hrnet_w18_small_v2=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(2,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(2, 2),
num_channels=(18, 36),
fuse_method='SUM'
),
stage3=dict(
num_modules=3,
num_branches=3,
block_type='BASIC',
num_blocks=(2, 2, 2),
num_channels=(18, 36, 72),
fuse_method='SUM'
),
stage4=dict(
num_modules=2,
num_branches=4,
block_type='BASIC',
num_blocks=(2, 2, 2, 2),
num_channels=(18, 36, 72, 144),
fuse_method='SUM',
),
),
hrnet_w18=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(18, 36),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(18, 36, 72),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(18, 36, 72, 144),
fuse_method='SUM',
),
),
hrnet_w30=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(30, 60),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(30, 60, 120),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(30, 60, 120, 240),
fuse_method='SUM',
),
),
hrnet_w32=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256),
fuse_method='SUM',
),
),
hrnet_w40=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(40, 80),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(40, 80, 160),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(40, 80, 160, 320),
fuse_method='SUM',
),
),
hrnet_w44=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(44, 88),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(44, 88, 176),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(44, 88, 176, 352),
fuse_method='SUM',
),
),
hrnet_w48=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(48, 96),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(48, 96, 192),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(48, 96, 192, 384),
fuse_method='SUM',
),
),
hrnet_w64=dict(
stem_width=64,
stage1=dict(
num_modules=1,
num_branches=1,
block_type='BOTTLENECK',
num_blocks=(4,),
num_channels=(64,),
fuse_method='SUM',
),
stage2=dict(
num_modules=1,
num_branches=2,
block_type='BASIC',
num_blocks=(4, 4),
num_channels=(64, 128),
fuse_method='SUM'
),
stage3=dict(
num_modules=4,
num_branches=3,
block_type='BASIC',
num_blocks=(4, 4, 4),
num_channels=(64, 128, 256),
fuse_method='SUM'
),
stage4=dict(
num_modules=3,
num_branches=4,
block_type='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(64, 128, 256, 512),
fuse_method='SUM',
),
)
)
class HighResolutionModule(nn.Module):
def __init__(
self,
num_branches,
block_types,
num_blocks,
num_in_chs,
num_channels,
fuse_method,
multi_scale_output=True,
):
super(HighResolutionModule, self).__init__()
self._check_branches(
num_branches,
block_types,
num_blocks,
num_in_chs,
num_channels,
)
self.num_in_chs = num_in_chs
self.fuse_method = fuse_method
self.num_branches = num_branches
self.multi_scale_output = multi_scale_output
self.branches = self._make_branches(
num_branches,
block_types,
num_blocks,
num_channels,
)
self.fuse_layers = self._make_fuse_layers()
self.fuse_act = nn.ReLU(False)
def _check_branches(self, num_branches, block_types, num_blocks, num_in_chs, num_channels):
error_msg = ''
if num_branches != len(num_blocks):
error_msg = 'num_branches({}) <> num_blocks({})'.format(num_branches, len(num_blocks))
elif num_branches != len(num_channels):
error_msg = 'num_branches({}) <> num_channels({})'.format(num_branches, len(num_channels))
elif num_branches != len(num_in_chs):
error_msg = 'num_branches({}) <> num_in_chs({})'.format(num_branches, len(num_in_chs))
if error_msg:
_logger.error(error_msg)
raise ValueError(error_msg)
def _make_one_branch(self, branch_index, block_type, num_blocks, num_channels, stride=1):
downsample = None
if stride != 1 or self.num_in_chs[branch_index] != num_channels[branch_index] * block_type.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.num_in_chs[branch_index], num_channels[branch_index] * block_type.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(num_channels[branch_index] * block_type.expansion, momentum=_BN_MOMENTUM),
)
layers = [block_type(self.num_in_chs[branch_index], num_channels[branch_index], stride, downsample)]
self.num_in_chs[branch_index] = num_channels[branch_index] * block_type.expansion
for i in range(1, num_blocks[branch_index]):
layers.append(block_type(self.num_in_chs[branch_index], num_channels[branch_index]))
return nn.Sequential(*layers)
def _make_branches(self, num_branches, block_type, num_blocks, num_channels):
branches = []
for i in range(num_branches):
branches.append(self._make_one_branch(i, block_type, num_blocks, num_channels))
return nn.ModuleList(branches)
def _make_fuse_layers(self):
if self.num_branches == 1:
return nn.Identity()
num_branches = self.num_branches
num_in_chs = self.num_in_chs
fuse_layers = []
for i in range(num_branches if self.multi_scale_output else 1):
fuse_layer = []
for j in range(num_branches):
if j > i:
fuse_layer.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_in_chs[i], 1, 1, 0, bias=False),
nn.BatchNorm2d(num_in_chs[i], momentum=_BN_MOMENTUM),
nn.Upsample(scale_factor=2 ** (j - i), mode='nearest')))
elif j == i:
fuse_layer.append(nn.Identity())
else:
conv3x3s = []
for k in range(i - j):
if k == i - j - 1:
num_out_chs_conv3x3 = num_in_chs[i]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM)
))
else:
num_out_chs_conv3x3 = num_in_chs[j]
conv3x3s.append(nn.Sequential(
nn.Conv2d(num_in_chs[j], num_out_chs_conv3x3, 3, 2, 1, bias=False),
nn.BatchNorm2d(num_out_chs_conv3x3, momentum=_BN_MOMENTUM),
nn.ReLU(False)
))
fuse_layer.append(nn.Sequential(*conv3x3s))
fuse_layers.append(nn.ModuleList(fuse_layer))
return nn.ModuleList(fuse_layers)
def get_num_in_chs(self):
return self.num_in_chs
def forward(self, x: List[torch.Tensor]) -> List[torch.Tensor]:
if self.num_branches == 1:
return [self.branches[0](x[0])]
for i, branch in enumerate(self.branches):
x[i] = branch(x[i])
x_fuse = []
for i, fuse_outer in enumerate(self.fuse_layers):
y = None
for j, f in enumerate(fuse_outer):
if y is None:
y = f(x[j])
else:
y = y + f(x[j])
x_fuse.append(self.fuse_act(y))
return x_fuse
class SequentialList(nn.Sequential):
def __init__(self, *args):
super(SequentialList, self).__init__(*args)
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (List[torch.Tensor]) -> (List[torch.Tensor])
pass
@torch.jit._overload_method # noqa: F811
def forward(self, x):
# type: (torch.Tensor) -> (List[torch.Tensor])
pass
def forward(self, x) -> List[torch.Tensor]:
for module in self:
x = module(x)
return x
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor: # `input` has a same name in Sequential forward
pass
block_types_dict = {
'BASIC': BasicBlock,
'BOTTLENECK': Bottleneck
}
class HighResolutionNet(nn.Module):
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.0,
head='classification',
**kwargs,
):
super(HighResolutionNet, self).__init__()
self.num_classes = num_classes
assert output_stride == 32 # FIXME support dilation
cfg.update(**kwargs)
stem_width = cfg['stem_width']
self.conv1 = nn.Conv2d(in_chans, stem_width, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(stem_width, momentum=_BN_MOMENTUM)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(stem_width, 64, kernel_size=3, stride=2, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(64, momentum=_BN_MOMENTUM)
self.act2 = nn.ReLU(inplace=True)
self.stage1_cfg = cfg['stage1']
num_channels = self.stage1_cfg['num_channels'][0]
block_type = block_types_dict[self.stage1_cfg['block_type']]
num_blocks = self.stage1_cfg['num_blocks'][0]
self.layer1 = self._make_layer(block_type, 64, num_channels, num_blocks)
stage1_out_channel = block_type.expansion * num_channels
self.stage2_cfg = cfg['stage2']
num_channels = self.stage2_cfg['num_channels']
block_type = block_types_dict[self.stage2_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition1 = self._make_transition_layer([stage1_out_channel], num_channels)
self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)
self.stage3_cfg = cfg['stage3']
num_channels = self.stage3_cfg['num_channels']
block_type = block_types_dict[self.stage3_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)
self.stage4_cfg = cfg['stage4']
num_channels = self.stage4_cfg['num_channels']
block_type = block_types_dict[self.stage4_cfg['block_type']]
num_channels = [num_channels[i] * block_type.expansion for i in range(len(num_channels))]
self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
self.stage4, pre_stage_channels = self._make_stage(self.stage4_cfg, num_channels, multi_scale_output=True)
self.head = head
self.head_channels = None # set if _make_head called
head_conv_bias = cfg.pop('head_conv_bias', True)
if head == 'classification':
# Classification Head
self.num_features = 2048
self.incre_modules, self.downsamp_modules, self.final_layer = self._make_head(
pre_stage_channels,
conv_bias=head_conv_bias,
)
self.global_pool, self.head_drop, self.classifier = create_classifier(
self.num_features,
self.num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
)
else:
if head == 'incre':
self.num_features = 2048
self.incre_modules, _, _ = self._make_head(pre_stage_channels, incre_only=True)
else:
self.num_features = 256
self.incre_modules = None
self.global_pool = nn.Identity()
self.head_drop = nn.Identity()
self.classifier = nn.Identity()
curr_stride = 2
# module names aren't actually valid here, hook or FeatureNet based extraction would not work
self.feature_info = [dict(num_chs=64, reduction=curr_stride, module='stem')]
for i, c in enumerate(self.head_channels if self.head_channels else num_channels):
curr_stride *= 2
c = c * 4 if self.head_channels else c # head block_type expansion factor of 4
self.feature_info += [dict(num_chs=c, reduction=curr_stride, module=f'stage{i + 1}')]
self.init_weights()
def _make_head(self, pre_stage_channels, incre_only=False, conv_bias=True):
head_block_type = Bottleneck
self.head_channels = [32, 64, 128, 256]
# Increasing the #channels on each resolution
# from C, 2C, 4C, 8C to 128, 256, 512, 1024
incre_modules = []
for i, channels in enumerate(pre_stage_channels):
incre_modules.append(self._make_layer(head_block_type, channels, self.head_channels[i], 1, stride=1))
incre_modules = nn.ModuleList(incre_modules)
if incre_only:
return incre_modules, None, None
# downsampling modules
downsamp_modules = []
for i in range(len(pre_stage_channels) - 1):
in_channels = self.head_channels[i] * head_block_type.expansion
out_channels = self.head_channels[i + 1] * head_block_type.expansion
downsamp_module = nn.Sequential(
nn.Conv2d(
in_channels=in_channels, out_channels=out_channels,
kernel_size=3, stride=2, padding=1, bias=conv_bias),
nn.BatchNorm2d(out_channels, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)
)
downsamp_modules.append(downsamp_module)
downsamp_modules = nn.ModuleList(downsamp_modules)
final_layer = nn.Sequential(
nn.Conv2d(
in_channels=self.head_channels[3] * head_block_type.expansion, out_channels=self.num_features,
kernel_size=1, stride=1, padding=0, bias=conv_bias),
nn.BatchNorm2d(self.num_features, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)
)
return incre_modules, downsamp_modules, final_layer
def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
num_branches_cur = len(num_channels_cur_layer)
num_branches_pre = len(num_channels_pre_layer)
transition_layers = []
for i in range(num_branches_cur):
if i < num_branches_pre:
if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
transition_layers.append(nn.Sequential(
nn.Conv2d(num_channels_pre_layer[i], num_channels_cur_layer[i], 3, 1, 1, bias=False),
nn.BatchNorm2d(num_channels_cur_layer[i], momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)))
else:
transition_layers.append(nn.Identity())
else:
conv3x3s = []
for j in range(i + 1 - num_branches_pre):
_in_chs = num_channels_pre_layer[-1]
_out_chs = num_channels_cur_layer[i] if j == i - num_branches_pre else _in_chs
conv3x3s.append(nn.Sequential(
nn.Conv2d(_in_chs, _out_chs, 3, 2, 1, bias=False),
nn.BatchNorm2d(_out_chs, momentum=_BN_MOMENTUM),
nn.ReLU(inplace=True)))
transition_layers.append(nn.Sequential(*conv3x3s))
return nn.ModuleList(transition_layers)
def _make_layer(self, block_type, inplanes, planes, block_types, stride=1):
downsample = None
if stride != 1 or inplanes != planes * block_type.expansion:
downsample = nn.Sequential(
nn.Conv2d(inplanes, planes * block_type.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block_type.expansion, momentum=_BN_MOMENTUM),
)
layers = [block_type(inplanes, planes, stride, downsample)]
inplanes = planes * block_type.expansion
for i in range(1, block_types):
layers.append(block_type(inplanes, planes))
return nn.Sequential(*layers)
def _make_stage(self, layer_config, num_in_chs, multi_scale_output=True):
num_modules = layer_config['num_modules']
num_branches = layer_config['num_branches']
num_blocks = layer_config['num_blocks']
num_channels = layer_config['num_channels']
block_type = block_types_dict[layer_config['block_type']]
fuse_method = layer_config['fuse_method']
modules = []
for i in range(num_modules):
# multi_scale_output is only used last module
reset_multi_scale_output = multi_scale_output or i < num_modules - 1
modules.append(HighResolutionModule(
num_branches, block_type, num_blocks, num_in_chs, num_channels, fuse_method, reset_multi_scale_output)
)
num_in_chs = modules[-1].get_num_in_chs()
return SequentialList(*modules), num_in_chs
@torch.jit.ignore
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^conv[12]|bn[12]',
block_types=r'^(?:layer|stage|transition)(\d+)' if coarse else [
(r'^layer(\d+)\.(\d+)', None),
(r'^stage(\d+)\.(\d+)', None),
(r'^transition(\d+)', (99999,)),
],
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, "gradient checkpointing not supported"
@torch.jit.ignore
def get_classifier(self):
return self.classifier
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.classifier = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def stages(self, x) -> List[torch.Tensor]:
x = self.layer1(x)
xl = [t(x) for i, t in enumerate(self.transition1)]
yl = self.stage2(xl)
xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition2)]
yl = self.stage3(xl)
xl = [t(yl[-1]) if not isinstance(t, nn.Identity) else yl[i] for i, t in enumerate(self.transition3)]
yl = self.stage4(xl)
return yl
def forward_features(self, x):
# Stem
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
# Stages
yl = self.stages(x)
if self.incre_modules is None or self.downsamp_modules is None:
return yl
y = None
for i, incre in enumerate(self.incre_modules):
if y is None:
y = incre(yl[i])
else:
down: ModuleInterface = self.downsamp_modules[i - 1] # needed for torchscript module indexing
y = incre(yl[i]) + down.forward(y)
y = self.final_layer(y)
return y
def forward_head(self, x, pre_logits: bool = False):
# Classification Head
x = self.global_pool(x)
x = self.head_drop(x)
return x if pre_logits else self.classifier(x)
def forward(self, x):
y = self.forward_features(x)
x = self.forward_head(y)
return x
class HighResolutionNetFeatures(HighResolutionNet):
"""HighResolutionNet feature extraction
The design of HRNet makes it easy to grab feature maps, this class provides a simple wrapper to do so.
It would be more complicated to use the FeatureNet helpers.
The `feature_location=incre` allows grabbing increased channel count features using part of the
classification head. If `feature_location=''` the default HRNet features are returned. First stem
conv is used for stride 2 features.
"""
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
output_stride=32,
global_pool='avg',
drop_rate=0.0,
feature_location='incre',
out_indices=(0, 1, 2, 3, 4),
**kwargs,
):
assert feature_location in ('incre', '')
super(HighResolutionNetFeatures, self).__init__(
cfg,
in_chans=in_chans,
num_classes=num_classes,
output_stride=output_stride,
global_pool=global_pool,
drop_rate=drop_rate,
head=feature_location,
**kwargs,
)
self.feature_info = FeatureInfo(self.feature_info, out_indices)
self._out_idx = {f['index'] for f in self.feature_info.get_dicts()}
def forward_features(self, x):
assert False, 'Not supported'
def forward(self, x) -> List[torch.tensor]:
out = []
x = self.conv1(x)
x = self.bn1(x)
x = self.act1(x)
if 0 in self._out_idx:
out.append(x)
x = self.conv2(x)
x = self.bn2(x)
x = self.act2(x)
x = self.stages(x)
if self.incre_modules is not None:
x = [incre(f) for f, incre in zip(x, self.incre_modules)]
for i, f in enumerate(x):
if i + 1 in self._out_idx:
out.append(f)
return out
def _create_hrnet(variant, pretrained=False, cfg_variant=None, **model_kwargs):
model_cls = HighResolutionNet
features_only = False
kwargs_filter = None
if model_kwargs.pop('features_only', False):
model_cls = HighResolutionNetFeatures
kwargs_filter = ('num_classes', 'global_pool')
features_only = True
cfg_variant = cfg_variant or variant
pretrained_strict = model_kwargs.pop(
'pretrained_strict',
not features_only and model_kwargs.get('head', 'classification') == 'classification'
)
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
model_cfg=cfg_cls[cfg_variant],
pretrained_strict=pretrained_strict,
kwargs_filter=kwargs_filter,
**model_kwargs,
)
if features_only:
model.pretrained_cfg = pretrained_cfg_for_features(model.default_cfg)
model.default_cfg = model.pretrained_cfg # backwards compat
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'classifier',
**kwargs
}
default_cfgs = generate_default_cfgs({
'hrnet_w18_small.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'),
'hrnet_w18_small.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18_small_v2.gluon_in1k': _cfg(hf_hub_id='timm/', interpolation='bicubic'),
'hrnet_w18_small_v2.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18.ms_aug_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95,
),
'hrnet_w18.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w30.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w32.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w40.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w44.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w48.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w64.ms_in1k': _cfg(hf_hub_id='timm/'),
'hrnet_w18_ssld.paddle_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)
),
'hrnet_w48_ssld.paddle_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288)
),
})
@register_model
def hrnet_w18_small(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18_small', pretrained, **kwargs)
@register_model
def hrnet_w18_small_v2(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18_small_v2', pretrained, **kwargs)
@register_model
def hrnet_w18(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w18', pretrained, **kwargs)
@register_model
def hrnet_w30(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w30', pretrained, **kwargs)
@register_model
def hrnet_w32(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w32', pretrained, **kwargs)
@register_model
def hrnet_w40(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w40', pretrained, **kwargs)
@register_model
def hrnet_w44(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w44', pretrained, **kwargs)
@register_model
def hrnet_w48(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w48', pretrained, **kwargs)
@register_model
def hrnet_w64(pretrained=False, **kwargs) -> HighResolutionNet:
return _create_hrnet('hrnet_w64', pretrained, **kwargs)
@register_model
def hrnet_w18_ssld(pretrained=False, **kwargs) -> HighResolutionNet:
kwargs.setdefault('head_conv_bias', False)
return _create_hrnet('hrnet_w18_ssld', cfg_variant='hrnet_w18', pretrained=pretrained, **kwargs)
@register_model
def hrnet_w48_ssld(pretrained=False, **kwargs) -> HighResolutionNet:
kwargs.setdefault('head_conv_bias', False)
return _create_hrnet('hrnet_w48_ssld', cfg_variant='hrnet_w48', pretrained=pretrained, **kwargs)
| pytorch-image-models/timm/models/hrnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/hrnet.py",
"repo_id": "pytorch-image-models",
"token_count": 17653
} | 369 |
""" Next-ViT
As described in https://arxiv.org/abs/2207.05501
Next-ViT model defs and weights adapted from https://github.com/bytedance/Next-ViT, original copyright below
"""
# Copyright (c) ByteDance Inc. All rights reserved.
from functools import partial
import torch
import torch.nn.functional as F
from torch import nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, trunc_normal_, ConvMlp, get_norm_layer, get_act_layer, use_fused_attn
from timm.layers import ClassifierHead
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
def merge_pre_bn(module, pre_bn_1, pre_bn_2=None):
""" Merge pre BN to reduce inference runtime.
"""
weight = module.weight.data
if module.bias is None:
zeros = torch.zeros(module.out_chs, device=weight.device).type(weight.type())
module.bias = nn.Parameter(zeros)
bias = module.bias.data
if pre_bn_2 is None:
assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False"
assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False"
scale_invstd = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5)
extra_weight = scale_invstd * pre_bn_1.weight
extra_bias = pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd
else:
assert pre_bn_1.track_running_stats is True, "Unsupported bn_module.track_running_stats is False"
assert pre_bn_1.affine is True, "Unsupported bn_module.affine is False"
assert pre_bn_2.track_running_stats is True, "Unsupported bn_module.track_running_stats is False"
assert pre_bn_2.affine is True, "Unsupported bn_module.affine is False"
scale_invstd_1 = pre_bn_1.running_var.add(pre_bn_1.eps).pow(-0.5)
scale_invstd_2 = pre_bn_2.running_var.add(pre_bn_2.eps).pow(-0.5)
extra_weight = scale_invstd_1 * pre_bn_1.weight * scale_invstd_2 * pre_bn_2.weight
extra_bias = (
scale_invstd_2 * pre_bn_2.weight
* (pre_bn_1.bias - pre_bn_1.weight * pre_bn_1.running_mean * scale_invstd_1 - pre_bn_2.running_mean)
+ pre_bn_2.bias
)
if isinstance(module, nn.Linear):
extra_bias = weight @ extra_bias
weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight))
elif isinstance(module, nn.Conv2d):
assert weight.shape[2] == 1 and weight.shape[3] == 1
weight = weight.reshape(weight.shape[0], weight.shape[1])
extra_bias = weight @ extra_bias
weight.mul_(extra_weight.view(1, weight.size(1)).expand_as(weight))
weight = weight.reshape(weight.shape[0], weight.shape[1], 1, 1)
bias.add_(extra_bias)
module.weight.data = weight
module.bias.data = bias
class ConvNormAct(nn.Module):
def __init__(
self,
in_chs,
out_chs,
kernel_size=3,
stride=1,
groups=1,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(ConvNormAct, self).__init__()
self.conv = nn.Conv2d(
in_chs, out_chs, kernel_size=kernel_size, stride=stride,
padding=1, groups=groups, bias=False)
self.norm = norm_layer(out_chs)
self.act = act_layer()
def forward(self, x):
x = self.conv(x)
x = self.norm(x)
x = self.act(x)
return x
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class PatchEmbed(nn.Module):
def __init__(self,
in_chs,
out_chs,
stride=1,
norm_layer = nn.BatchNorm2d,
):
super(PatchEmbed, self).__init__()
if stride == 2:
self.pool = nn.AvgPool2d((2, 2), stride=2, ceil_mode=True, count_include_pad=False)
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False)
self.norm = norm_layer(out_chs)
elif in_chs != out_chs:
self.pool = nn.Identity()
self.conv = nn.Conv2d(in_chs, out_chs, kernel_size=1, stride=1, bias=False)
self.norm = norm_layer(out_chs)
else:
self.pool = nn.Identity()
self.conv = nn.Identity()
self.norm = nn.Identity()
def forward(self, x):
return self.norm(self.conv(self.pool(x)))
class ConvAttention(nn.Module):
"""
Multi-Head Convolutional Attention
"""
def __init__(self, out_chs, head_dim, norm_layer = nn.BatchNorm2d, act_layer = nn.ReLU):
super(ConvAttention, self).__init__()
self.group_conv3x3 = nn.Conv2d(
out_chs, out_chs,
kernel_size=3, stride=1, padding=1, groups=out_chs // head_dim, bias=False
)
self.norm = norm_layer(out_chs)
self.act = act_layer()
self.projection = nn.Conv2d(out_chs, out_chs, kernel_size=1, bias=False)
def forward(self, x):
out = self.group_conv3x3(x)
out = self.norm(out)
out = self.act(out)
out = self.projection(out)
return out
class NextConvBlock(nn.Module):
"""
Next Convolution Block
"""
def __init__(
self,
in_chs,
out_chs,
stride=1,
drop_path=0.,
drop=0.,
head_dim=32,
mlp_ratio=3.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU
):
super(NextConvBlock, self).__init__()
self.in_chs = in_chs
self.out_chs = out_chs
assert out_chs % head_dim == 0
self.patch_embed = PatchEmbed(in_chs, out_chs, stride, norm_layer=norm_layer)
self.mhca = ConvAttention(
out_chs,
head_dim,
norm_layer=norm_layer,
act_layer=act_layer,
)
self.attn_drop_path = DropPath(drop_path)
self.norm = norm_layer(out_chs)
self.mlp = ConvMlp(
out_chs,
hidden_features=int(out_chs * mlp_ratio),
drop=drop,
bias=True,
act_layer=act_layer,
)
self.mlp_drop_path = DropPath(drop_path)
self.is_fused = False
@torch.no_grad()
def reparameterize(self):
if not self.is_fused:
merge_pre_bn(self.mlp.fc1, self.norm)
self.norm = None
self.is_fused = True
def forward(self, x):
x = self.patch_embed(x)
x = x + self.attn_drop_path(self.mhca(x))
out = self.norm(x)
x = x + self.mlp_drop_path(self.mlp(out))
return x
class EfficientAttention(nn.Module):
"""
Efficient Multi-Head Self Attention
"""
fused_attn: torch.jit.Final[bool]
def __init__(
self,
dim,
out_dim=None,
head_dim=32,
qkv_bias=True,
attn_drop=0.,
proj_drop=0.,
sr_ratio=1,
norm_layer=nn.BatchNorm1d,
):
super().__init__()
self.dim = dim
self.out_dim = out_dim if out_dim is not None else dim
self.num_heads = self.dim // head_dim
self.head_dim = head_dim
self.scale = head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.q = nn.Linear(dim, self.dim, bias=qkv_bias)
self.k = nn.Linear(dim, self.dim, bias=qkv_bias)
self.v = nn.Linear(dim, self.dim, bias=qkv_bias)
self.proj = nn.Linear(self.dim, self.out_dim)
self.attn_drop = nn.Dropout(attn_drop)
self.proj_drop = nn.Dropout(proj_drop)
self.sr_ratio = sr_ratio
self.N_ratio = sr_ratio ** 2
if sr_ratio > 1:
self.sr = nn.AvgPool1d(kernel_size=self.N_ratio, stride=self.N_ratio)
self.norm = norm_layer(dim)
else:
self.sr = None
self.norm = None
def forward(self, x):
B, N, C = x.shape
q = self.q(x).reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
if self.sr is not None:
x = self.sr(x.transpose(1, 2))
x = self.norm(x).transpose(1, 2)
k = self.k(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2)
v = self.v(x).reshape(B, -1, self.num_heads, self.head_dim).transpose(1, 2)
if self.fused_attn:
x = F.scaled_dot_product_attention(
q, k, v,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-1, -2)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class NextTransformerBlock(nn.Module):
"""
Next Transformer Block
"""
def __init__(
self,
in_chs,
out_chs,
drop_path,
stride=1,
sr_ratio=1,
mlp_ratio=2,
head_dim=32,
mix_block_ratio=0.75,
attn_drop=0.,
drop=0.,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super(NextTransformerBlock, self).__init__()
self.in_chs = in_chs
self.out_chs = out_chs
self.mix_block_ratio = mix_block_ratio
self.mhsa_out_chs = _make_divisible(int(out_chs * mix_block_ratio), 32)
self.mhca_out_chs = out_chs - self.mhsa_out_chs
self.patch_embed = PatchEmbed(in_chs, self.mhsa_out_chs, stride)
self.norm1 = norm_layer(self.mhsa_out_chs)
self.e_mhsa = EfficientAttention(
self.mhsa_out_chs,
head_dim=head_dim,
sr_ratio=sr_ratio,
attn_drop=attn_drop,
proj_drop=drop,
)
self.mhsa_drop_path = DropPath(drop_path * mix_block_ratio)
self.projection = PatchEmbed(self.mhsa_out_chs, self.mhca_out_chs, stride=1, norm_layer=norm_layer)
self.mhca = ConvAttention(
self.mhca_out_chs,
head_dim=head_dim,
norm_layer=norm_layer,
act_layer=act_layer,
)
self.mhca_drop_path = DropPath(drop_path * (1 - mix_block_ratio))
self.norm2 = norm_layer(out_chs)
self.mlp = ConvMlp(
out_chs,
hidden_features=int(out_chs * mlp_ratio),
act_layer=act_layer,
drop=drop,
)
self.mlp_drop_path = DropPath(drop_path)
self.is_fused = False
@torch.no_grad()
def reparameterize(self):
if not self.is_fused:
merge_pre_bn(self.e_mhsa.q, self.norm1)
if self.e_mhsa.norm is not None:
merge_pre_bn(self.e_mhsa.k, self.norm1, self.e_mhsa.norm)
merge_pre_bn(self.e_mhsa.v, self.norm1, self.e_mhsa.norm)
self.e_mhsa.norm = nn.Identity()
else:
merge_pre_bn(self.e_mhsa.k, self.norm1)
merge_pre_bn(self.e_mhsa.v, self.norm1)
self.norm1 = nn.Identity()
merge_pre_bn(self.mlp.fc1, self.norm2)
self.norm2 = nn.Identity()
self.is_fused = True
def forward(self, x):
x = self.patch_embed(x)
B, C, H, W = x.shape
out = self.norm1(x)
out = out.reshape(B, C, -1).transpose(-1, -2)
out = self.mhsa_drop_path(self.e_mhsa(out))
x = x + out.transpose(-1, -2).reshape(B, C, H, W)
out = self.projection(x)
out = out + self.mhca_drop_path(self.mhca(out))
x = torch.cat([x, out], dim=1)
out = self.norm2(x)
x = x + self.mlp_drop_path(self.mlp(out))
return x
class NextStage(nn.Module):
def __init__(
self,
in_chs,
block_chs,
block_types,
stride=2,
sr_ratio=1,
mix_block_ratio=1.0,
drop=0.,
attn_drop=0.,
drop_path=0.,
head_dim=32,
norm_layer=nn.BatchNorm2d,
act_layer=nn.ReLU,
):
super().__init__()
self.grad_checkpointing = False
blocks = []
for block_idx, block_type in enumerate(block_types):
stride = stride if block_idx == 0 else 1
out_chs = block_chs[block_idx]
block_type = block_types[block_idx]
dpr = drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path
if block_type is NextConvBlock:
layer = NextConvBlock(
in_chs,
out_chs,
stride=stride,
drop_path=dpr,
drop=drop,
head_dim=head_dim,
norm_layer=norm_layer,
act_layer=act_layer,
)
blocks.append(layer)
elif block_type is NextTransformerBlock:
layer = NextTransformerBlock(
in_chs,
out_chs,
drop_path=dpr,
stride=stride,
sr_ratio=sr_ratio,
head_dim=head_dim,
mix_block_ratio=mix_block_ratio,
attn_drop=attn_drop,
drop=drop,
norm_layer=norm_layer,
act_layer=act_layer,
)
blocks.append(layer)
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
def forward(self, x):
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class NextViT(nn.Module):
def __init__(
self,
in_chans,
num_classes=1000,
global_pool='avg',
stem_chs=(64, 32, 64),
depths=(3, 4, 10, 3),
strides=(1, 2, 2, 2),
sr_ratios=(8, 4, 2, 1),
drop_path_rate=0.1,
attn_drop_rate=0.,
drop_rate=0.,
head_dim=32,
mix_block_ratio=0.75,
norm_layer=nn.BatchNorm2d,
act_layer=None,
):
super(NextViT, self).__init__()
self.grad_checkpointing = False
self.num_classes = num_classes
norm_layer = get_norm_layer(norm_layer)
if act_layer is None:
act_layer = partial(nn.ReLU, inplace=True)
else:
act_layer = get_act_layer(act_layer)
self.stage_out_chs = [
[96] * (depths[0]),
[192] * (depths[1] - 1) + [256],
[384, 384, 384, 384, 512] * (depths[2] // 5),
[768] * (depths[3] - 1) + [1024]
]
self.feature_info = [dict(
num_chs=sc[-1],
reduction=2**(i + 2),
module=f'stages.{i}'
) for i, sc in enumerate(self.stage_out_chs)]
# Next Hybrid Strategy
self.stage_block_types = [
[NextConvBlock] * depths[0],
[NextConvBlock] * (depths[1] - 1) + [NextTransformerBlock],
[NextConvBlock, NextConvBlock, NextConvBlock, NextConvBlock, NextTransformerBlock] * (depths[2] // 5),
[NextConvBlock] * (depths[3] - 1) + [NextTransformerBlock]]
self.stem = nn.Sequential(
ConvNormAct(in_chans, stem_chs[0], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer),
ConvNormAct(stem_chs[0], stem_chs[1], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer),
ConvNormAct(stem_chs[1], stem_chs[2], kernel_size=3, stride=1, norm_layer=norm_layer, act_layer=act_layer),
ConvNormAct(stem_chs[2], stem_chs[2], kernel_size=3, stride=2, norm_layer=norm_layer, act_layer=act_layer),
)
in_chs = out_chs = stem_chs[-1]
stages = []
idx = 0
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
for stage_idx in range(len(depths)):
stage = NextStage(
in_chs=in_chs,
block_chs=self.stage_out_chs[stage_idx],
block_types=self.stage_block_types[stage_idx],
stride=strides[stage_idx],
sr_ratio=sr_ratios[stage_idx],
mix_block_ratio=mix_block_ratio,
head_dim=head_dim,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[stage_idx],
norm_layer=norm_layer,
act_layer=act_layer,
)
in_chs = out_chs = self.stage_out_chs[stage_idx][-1]
stages += [stage]
idx += depths[stage_idx]
self.num_features = out_chs
self.stages = nn.Sequential(*stages)
self.norm = norm_layer(out_chs)
self.head = ClassifierHead(pool_type=global_pool, in_features=out_chs, num_classes=num_classes)
self.stage_out_idx = [sum(depths[:idx + 1]) - 1 for idx in range(len(depths))]
self._initialize_weights()
def _initialize_weights(self):
for n, m in self.named_modules():
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Conv2d):
trunc_normal_(m.weight, std=.02)
if hasattr(m, 'bias') and m.bias is not None:
nn.init.constant_(m.bias, 0)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)' if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^norm', (99999,)),
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
for stage in self.stages:
stage.set_grad_checkpointing(enable=enable)
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.stages, x)
else:
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'head.fc.weight' in state_dict:
return state_dict # non-original
D = model.state_dict()
out_dict = {}
# remap originals based on order
for ka, kb, va, vb in zip(D.keys(), state_dict.keys(), D.values(), state_dict.values()):
out_dict[ka] = vb
return out_dict
def _create_nextvit(variant, pretrained=False, **kwargs):
default_out_indices = tuple(i for i, _ in enumerate(kwargs.get('depths', (1, 1, 3, 1))))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
NextViT,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.95, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0.conv', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'nextvit_small.bd_in1k': _cfg(
hf_hub_id='timm/',
),
'nextvit_base.bd_in1k': _cfg(
hf_hub_id='timm/',
),
'nextvit_large.bd_in1k': _cfg(
hf_hub_id='timm/',
),
'nextvit_small.bd_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'nextvit_base.bd_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'nextvit_large.bd_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'nextvit_small.bd_ssld_6m_in1k': _cfg(
hf_hub_id='timm/',
),
'nextvit_base.bd_ssld_6m_in1k': _cfg(
hf_hub_id='timm/',
),
'nextvit_large.bd_ssld_6m_in1k': _cfg(
hf_hub_id='timm/',
),
'nextvit_small.bd_ssld_6m_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'nextvit_base.bd_ssld_6m_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
'nextvit_large.bd_ssld_6m_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0,
),
})
@register_model
def nextvit_small(pretrained=False, **kwargs):
model_args = dict(depths=(3, 4, 10, 3), drop_path_rate=0.1)
model = _create_nextvit(
'nextvit_small', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def nextvit_base(pretrained=False, **kwargs):
model_args = dict(depths=(3, 4, 20, 3), drop_path_rate=0.2)
model = _create_nextvit(
'nextvit_base', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def nextvit_large(pretrained=False, **kwargs):
model_args = dict(depths=(3, 4, 30, 3), drop_path_rate=0.2)
model = _create_nextvit(
'nextvit_large', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/nextvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/nextvit.py",
"repo_id": "pytorch-image-models",
"token_count": 12168
} | 370 |
""" Sequencer
Paper: `Sequencer: Deep LSTM for Image Classification` - https://arxiv.org/pdf/2205.01972.pdf
"""
# Copyright (c) 2022. Yuki Tatsunami
# Licensed under the Apache License, Version 2.0 (the "License");
import math
from functools import partial
from itertools import accumulate
from typing import Tuple
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from timm.layers import lecun_normal_, DropPath, Mlp, PatchEmbed, ClassifierHead
from ._builder import build_model_with_cfg
from ._manipulate import named_apply
from ._registry import register_model, generate_default_cfgs
__all__ = ['Sequencer2d'] # model_registry will add each entrypoint fn to this
def _init_weights(module: nn.Module, name: str, head_bias: float = 0., flax=False):
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
if flax:
# Flax defaults
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.RNN, nn.GRU, nn.LSTM)):
stdv = 1.0 / math.sqrt(module.hidden_size)
for weight in module.parameters():
nn.init.uniform_(weight, -stdv, stdv)
elif hasattr(module, 'init_weights'):
module.init_weights()
class RNNIdentity(nn.Module):
def __init__(self, *args, **kwargs):
super(RNNIdentity, self).__init__()
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, None]:
return x, None
class RNN2dBase(nn.Module):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
bidirectional: bool = True,
union="cat",
with_fc=True,
):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = 2 * hidden_size if bidirectional else hidden_size
self.union = union
self.with_vertical = True
self.with_horizontal = True
self.with_fc = with_fc
self.fc = None
if with_fc:
if union == "cat":
self.fc = nn.Linear(2 * self.output_size, input_size)
elif union == "add":
self.fc = nn.Linear(self.output_size, input_size)
elif union == "vertical":
self.fc = nn.Linear(self.output_size, input_size)
self.with_horizontal = False
elif union == "horizontal":
self.fc = nn.Linear(self.output_size, input_size)
self.with_vertical = False
else:
raise ValueError("Unrecognized union: " + union)
elif union == "cat":
pass
if 2 * self.output_size != input_size:
raise ValueError(f"The output channel {2 * self.output_size} is different from the input channel {input_size}.")
elif union == "add":
pass
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
elif union == "vertical":
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
self.with_horizontal = False
elif union == "horizontal":
if self.output_size != input_size:
raise ValueError(f"The output channel {self.output_size} is different from the input channel {input_size}.")
self.with_vertical = False
else:
raise ValueError("Unrecognized union: " + union)
self.rnn_v = RNNIdentity()
self.rnn_h = RNNIdentity()
def forward(self, x):
B, H, W, C = x.shape
if self.with_vertical:
v = x.permute(0, 2, 1, 3)
v = v.reshape(-1, H, C)
v, _ = self.rnn_v(v)
v = v.reshape(B, W, H, -1)
v = v.permute(0, 2, 1, 3)
else:
v = None
if self.with_horizontal:
h = x.reshape(-1, W, C)
h, _ = self.rnn_h(h)
h = h.reshape(B, H, W, -1)
else:
h = None
if v is not None and h is not None:
if self.union == "cat":
x = torch.cat([v, h], dim=-1)
else:
x = v + h
elif v is not None:
x = v
elif h is not None:
x = h
if self.fc is not None:
x = self.fc(x)
return x
class LSTM2d(RNN2dBase):
def __init__(
self,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
bidirectional: bool = True,
union="cat",
with_fc=True,
):
super().__init__(input_size, hidden_size, num_layers, bias, bidirectional, union, with_fc)
if self.with_vertical:
self.rnn_v = nn.LSTM(
input_size,
hidden_size,
num_layers,
batch_first=True,
bias=bias,
bidirectional=bidirectional,
)
if self.with_horizontal:
self.rnn_h = nn.LSTM(
input_size,
hidden_size,
num_layers,
batch_first=True,
bias=bias,
bidirectional=bidirectional,
)
class Sequencer2dBlock(nn.Module):
def __init__(
self,
dim,
hidden_size,
mlp_ratio=3.0,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop=0.,
drop_path=0.,
):
super().__init__()
channels_dim = int(mlp_ratio * dim)
self.norm1 = norm_layer(dim)
self.rnn_tokens = rnn_layer(
dim,
hidden_size,
num_layers=num_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(self.rnn_tokens(self.norm1(x)))
x = x + self.drop_path(self.mlp_channels(self.norm2(x)))
return x
class Shuffle(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if self.training:
B, H, W, C = x.shape
r = torch.randperm(H * W)
x = x.reshape(B, -1, C)
x = x[:, r, :].reshape(B, H, W, -1)
return x
class Downsample2d(nn.Module):
def __init__(self, input_dim, output_dim, patch_size):
super().__init__()
self.down = nn.Conv2d(input_dim, output_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = x.permute(0, 3, 1, 2)
x = self.down(x)
x = x.permute(0, 2, 3, 1)
return x
class Sequencer2dStage(nn.Module):
def __init__(
self,
dim,
dim_out,
depth,
patch_size,
hidden_size,
mlp_ratio,
downsample=False,
block_layer=Sequencer2dBlock,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop=0.,
drop_path=0.,
):
super().__init__()
if downsample:
self.downsample = Downsample2d(dim, dim_out, patch_size)
else:
assert dim == dim_out
self.downsample = nn.Identity()
blocks = []
for block_idx in range(depth):
blocks.append(block_layer(
dim_out,
hidden_size,
mlp_ratio=mlp_ratio,
rnn_layer=rnn_layer,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
num_layers=num_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
drop=drop,
drop_path=drop_path[block_idx] if isinstance(drop_path, (list, tuple)) else drop_path,
))
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
x = self.downsample(x)
x = self.blocks(x)
return x
class Sequencer2d(nn.Module):
def __init__(
self,
num_classes=1000,
img_size=224,
in_chans=3,
global_pool='avg',
layers=(4, 3, 8, 3),
patch_sizes=(7, 2, 2, 1),
embed_dims=(192, 384, 384, 384),
hidden_sizes=(48, 96, 96, 96),
mlp_ratios=(3.0, 3.0, 3.0, 3.0),
block_layer=Sequencer2dBlock,
rnn_layer=LSTM2d,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
num_rnn_layers=1,
bidirectional=True,
union="cat",
with_fc=True,
drop_rate=0.,
drop_path_rate=0.,
nlhb=False,
stem_norm=False,
):
super().__init__()
assert global_pool in ('', 'avg')
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = embed_dims[-1] # num_features for consistency with other models
self.feature_dim = -1 # channel dim index for feature outputs (rank 4, NHWC)
self.output_fmt = 'NHWC'
self.feature_info = []
self.stem = PatchEmbed(
img_size=None,
patch_size=patch_sizes[0],
in_chans=in_chans,
embed_dim=embed_dims[0],
norm_layer=norm_layer if stem_norm else None,
flatten=False,
output_fmt='NHWC',
)
assert len(layers) == len(patch_sizes) == len(embed_dims) == len(hidden_sizes) == len(mlp_ratios)
reductions = list(accumulate(patch_sizes, lambda x, y: x * y))
stages = []
prev_dim = embed_dims[0]
for i, _ in enumerate(embed_dims):
stages += [Sequencer2dStage(
prev_dim,
embed_dims[i],
depth=layers[i],
downsample=i > 0,
patch_size=patch_sizes[i],
hidden_size=hidden_sizes[i],
mlp_ratio=mlp_ratios[i],
block_layer=block_layer,
rnn_layer=rnn_layer,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
num_layers=num_rnn_layers,
bidirectional=bidirectional,
union=union,
with_fc=with_fc,
drop=drop_rate,
drop_path=drop_path_rate,
)]
prev_dim = embed_dims[i]
self.feature_info += [dict(num_chs=prev_dim, reduction=reductions[i], module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.norm = norm_layer(embed_dims[-1])
self.head = ClassifierHead(
self.num_features,
num_classes,
pool_type=global_pool,
drop_rate=drop_rate,
input_fmt=self.output_fmt,
)
self.init_weights(nlhb=nlhb)
def init_weights(self, nlhb=False):
head_bias = -math.log(self.num_classes) if nlhb else 0.
named_apply(partial(_init_weights, head_bias=head_bias), module=self) # depth-first
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=[
(r'^stages\.(\d+)', None),
(r'^norm', (99999,))
] if coarse else [
(r'^stages\.(\d+)\.blocks\.(\d+)', None),
(r'^stages\.(\d+)\.downsample', (0,)),
(r'^norm', (99999,))
]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
assert not enable, 'gradient checkpointing not supported'
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head.reset(num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.stem(x)
x = self.stages(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=True) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def checkpoint_filter_fn(state_dict, model):
""" Remap original checkpoints -> timm """
if 'stages.0.blocks.0.norm1.weight' in state_dict:
return state_dict # already translated checkpoint
if 'model' in state_dict:
state_dict = state_dict['model']
import re
out_dict = {}
for k, v in state_dict.items():
k = re.sub(r'blocks.([0-9]+).([0-9]+).down', lambda x: f'stages.{int(x.group(1)) + 1}.downsample.down', k)
k = re.sub(r'blocks.([0-9]+).([0-9]+)', r'stages.\1.blocks.\2', k)
k = k.replace('head.', 'head.fc.')
out_dict[k] = v
return out_dict
def _create_sequencer2d(variant, pretrained=False, **kwargs):
default_out_indices = tuple(range(3))
out_indices = kwargs.pop('out_indices', default_out_indices)
model = build_model_with_cfg(
Sequencer2d,
variant,
pretrained,
pretrained_filter_fn=checkpoint_filter_fn,
feature_cfg=dict(flatten_sequential=True, out_indices=out_indices),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': DEFAULT_CROP_PCT, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.proj', 'classifier': 'head.fc',
**kwargs
}
default_cfgs = generate_default_cfgs({
'sequencer2d_s.in1k': _cfg(hf_hub_id='timm/'),
'sequencer2d_m.in1k': _cfg(hf_hub_id='timm/'),
'sequencer2d_l.in1k': _cfg(hf_hub_id='timm/'),
})
@register_model
def sequencer2d_s(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[4, 3, 8, 3],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
)
model = _create_sequencer2d('sequencer2d_s', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def sequencer2d_m(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[4, 3, 14, 3],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
**kwargs)
model = _create_sequencer2d('sequencer2d_m', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def sequencer2d_l(pretrained=False, **kwargs) -> Sequencer2d:
model_args = dict(
layers=[8, 8, 16, 4],
patch_sizes=[7, 2, 1, 1],
embed_dims=[192, 384, 384, 384],
hidden_sizes=[48, 96, 96, 96],
mlp_ratios=[3.0, 3.0, 3.0, 3.0],
rnn_layer=LSTM2d,
bidirectional=True,
union="cat",
with_fc=True,
**kwargs)
model = _create_sequencer2d('sequencer2d_l', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/sequencer.py/0 | {
"file_path": "pytorch-image-models/timm/models/sequencer.py",
"repo_id": "pytorch-image-models",
"token_count": 9227
} | 371 |
""" VoVNet (V1 & V2)
Papers:
* `An Energy and GPU-Computation Efficient Backbone Network` - https://arxiv.org/abs/1904.09730
* `CenterMask : Real-Time Anchor-Free Instance Segmentation` - https://arxiv.org/abs/1911.06667
Looked at https://github.com/youngwanLEE/vovnet-detectron2 &
https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
for some reference, rewrote most of the code.
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import List
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ConvNormAct, SeparableConvNormAct, BatchNormAct2d, ClassifierHead, DropPath, \
create_attn, create_norm_act_layer
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import register_model, generate_default_cfgs
__all__ = ['VovNet'] # model_registry will add each entrypoint fn to this
class SequentialAppendList(nn.Sequential):
def __init__(self, *args):
super(SequentialAppendList, self).__init__(*args)
def forward(self, x: torch.Tensor, concat_list: List[torch.Tensor]) -> torch.Tensor:
for i, module in enumerate(self):
if i == 0:
concat_list.append(module(x))
else:
concat_list.append(module(concat_list[-1]))
x = torch.cat(concat_list, dim=1)
return x
class OsaBlock(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
layer_per_block,
residual=False,
depthwise=False,
attn='',
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_path=None,
):
super(OsaBlock, self).__init__()
self.residual = residual
self.depthwise = depthwise
conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer)
next_in_chs = in_chs
if self.depthwise and next_in_chs != mid_chs:
assert not residual
self.conv_reduction = ConvNormAct(next_in_chs, mid_chs, 1, **conv_kwargs)
else:
self.conv_reduction = None
mid_convs = []
for i in range(layer_per_block):
if self.depthwise:
conv = SeparableConvNormAct(mid_chs, mid_chs, **conv_kwargs)
else:
conv = ConvNormAct(next_in_chs, mid_chs, 3, **conv_kwargs)
next_in_chs = mid_chs
mid_convs.append(conv)
self.conv_mid = SequentialAppendList(*mid_convs)
# feature aggregation
next_in_chs = in_chs + layer_per_block * mid_chs
self.conv_concat = ConvNormAct(next_in_chs, out_chs, **conv_kwargs)
self.attn = create_attn(attn, out_chs) if attn else None
self.drop_path = drop_path
def forward(self, x):
output = [x]
if self.conv_reduction is not None:
x = self.conv_reduction(x)
x = self.conv_mid(x, output)
x = self.conv_concat(x)
if self.attn is not None:
x = self.attn(x)
if self.drop_path is not None:
x = self.drop_path(x)
if self.residual:
x = x + output[0]
return x
class OsaStage(nn.Module):
def __init__(
self,
in_chs,
mid_chs,
out_chs,
block_per_stage,
layer_per_block,
downsample=True,
residual=True,
depthwise=False,
attn='ese',
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_path_rates=None,
):
super(OsaStage, self).__init__()
self.grad_checkpointing = False
if downsample:
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True)
else:
self.pool = None
blocks = []
for i in range(block_per_stage):
last_block = i == block_per_stage - 1
if drop_path_rates is not None and drop_path_rates[i] > 0.:
drop_path = DropPath(drop_path_rates[i])
else:
drop_path = None
blocks += [OsaBlock(
in_chs, mid_chs, out_chs, layer_per_block, residual=residual and i > 0, depthwise=depthwise,
attn=attn if last_block else '', norm_layer=norm_layer, act_layer=act_layer, drop_path=drop_path)
]
in_chs = out_chs
self.blocks = nn.Sequential(*blocks)
def forward(self, x):
if self.pool is not None:
x = self.pool(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
return x
class VovNet(nn.Module):
def __init__(
self,
cfg,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
norm_layer=BatchNormAct2d,
act_layer=nn.ReLU,
drop_rate=0.,
drop_path_rate=0.,
**kwargs,
):
"""
Args:
cfg (dict): Model architecture configuration
in_chans (int): Number of input channels (default: 3)
num_classes (int): Number of classifier classes (default: 1000)
global_pool (str): Global pooling type (default: 'avg')
output_stride (int): Output stride of network, one of (8, 16, 32) (default: 32)
norm_layer (Union[str, nn.Module]): normalization layer
act_layer (Union[str, nn.Module]): activation layer
drop_rate (float): Dropout rate (default: 0.)
drop_path_rate (float): Stochastic depth drop-path rate (default: 0.)
kwargs (dict): Extra kwargs overlayed onto cfg
"""
super(VovNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
assert output_stride == 32 # FIXME support dilation
cfg = dict(cfg, **kwargs)
stem_stride = cfg.get("stem_stride", 4)
stem_chs = cfg["stem_chs"]
stage_conv_chs = cfg["stage_conv_chs"]
stage_out_chs = cfg["stage_out_chs"]
block_per_stage = cfg["block_per_stage"]
layer_per_block = cfg["layer_per_block"]
conv_kwargs = dict(norm_layer=norm_layer, act_layer=act_layer)
# Stem module
last_stem_stride = stem_stride // 2
conv_type = SeparableConvNormAct if cfg["depthwise"] else ConvNormAct
self.stem = nn.Sequential(*[
ConvNormAct(in_chans, stem_chs[0], 3, stride=2, **conv_kwargs),
conv_type(stem_chs[0], stem_chs[1], 3, stride=1, **conv_kwargs),
conv_type(stem_chs[1], stem_chs[2], 3, stride=last_stem_stride, **conv_kwargs),
])
self.feature_info = [dict(
num_chs=stem_chs[1], reduction=2, module=f'stem.{1 if stem_stride == 4 else 2}')]
current_stride = stem_stride
# OSA stages
stage_dpr = torch.split(torch.linspace(0, drop_path_rate, sum(block_per_stage)), block_per_stage)
in_ch_list = stem_chs[-1:] + stage_out_chs[:-1]
stage_args = dict(residual=cfg["residual"], depthwise=cfg["depthwise"], attn=cfg["attn"], **conv_kwargs)
stages = []
for i in range(4): # num_stages
downsample = stem_stride == 2 or i > 0 # first stage has no stride/downsample if stem_stride is 4
stages += [OsaStage(
in_ch_list[i],
stage_conv_chs[i],
stage_out_chs[i],
block_per_stage[i],
layer_per_block,
downsample=downsample,
drop_path_rates=stage_dpr[i],
**stage_args,
)]
self.num_features = stage_out_chs[i]
current_stride *= 2 if downsample else 1
self.feature_info += [dict(num_chs=self.num_features, reduction=current_stride, module=f'stages.{i}')]
self.stages = nn.Sequential(*stages)
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
for n, m in self.named_modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.Linear):
nn.init.zeros_(m.bias)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^stem',
blocks=r'^stages\.(\d+)' if coarse else r'^stages\.(\d+).blocks\.(\d+)',
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
return self.stages(x)
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
# model cfgs adapted from https://github.com/youngwanLEE/vovnet-detectron2 &
# https://github.com/stigma0617/VoVNet.pytorch/blob/master/models_vovnet/vovnet.py
model_cfgs = dict(
vovnet39a=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=False,
depthwise=False,
attn='',
),
vovnet57a=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 4, 3],
residual=False,
depthwise=False,
attn='',
),
ese_vovnet19b_slim_dw=dict(
stem_chs=[64, 64, 64],
stage_conv_chs=[64, 80, 96, 112],
stage_out_chs=[112, 256, 384, 512],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=True,
attn='ese',
),
ese_vovnet19b_dw=dict(
stem_chs=[64, 64, 64],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=True,
attn='ese',
),
ese_vovnet19b_slim=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[64, 80, 96, 112],
stage_out_chs=[112, 256, 384, 512],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet19b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=3,
block_per_stage=[1, 1, 1, 1],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet39b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet57b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 4, 3],
residual=True,
depthwise=False,
attn='ese',
),
ese_vovnet99b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 3, 9, 3],
residual=True,
depthwise=False,
attn='ese',
),
eca_vovnet39b=dict(
stem_chs=[64, 64, 128],
stage_conv_chs=[128, 160, 192, 224],
stage_out_chs=[256, 512, 768, 1024],
layer_per_block=5,
block_per_stage=[1, 1, 2, 2],
residual=True,
depthwise=False,
attn='eca',
),
)
model_cfgs['ese_vovnet39b_evos'] = model_cfgs['ese_vovnet39b']
def _create_vovnet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
VovNet,
variant,
pretrained,
model_cfg=model_cfgs[variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.0.conv', 'classifier': 'head.fc', **kwargs,
}
default_cfgs = generate_default_cfgs({
'vovnet39a.untrained': _cfg(url=''),
'vovnet57a.untrained': _cfg(url=''),
'ese_vovnet19b_slim_dw.untrained': _cfg(url=''),
'ese_vovnet19b_dw.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'ese_vovnet19b_slim.untrained': _cfg(url=''),
'ese_vovnet39b.ra_in1k': _cfg(
hf_hub_id='timm/',
test_input_size=(3, 288, 288), test_crop_pct=0.95),
'ese_vovnet57b.untrained': _cfg(url=''),
'ese_vovnet99b.untrained': _cfg(url=''),
'eca_vovnet39b.untrained': _cfg(url=''),
'ese_vovnet39b_evos.untrained': _cfg(url=''),
})
@register_model
def vovnet39a(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('vovnet39a', pretrained=pretrained, **kwargs)
@register_model
def vovnet57a(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('vovnet57a', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_slim_dw(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_slim_dw', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_dw(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_dw', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet19b_slim(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet19b_slim', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet39b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet39b', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet57b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet57b', pretrained=pretrained, **kwargs)
@register_model
def ese_vovnet99b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('ese_vovnet99b', pretrained=pretrained, **kwargs)
@register_model
def eca_vovnet39b(pretrained=False, **kwargs) -> VovNet:
return _create_vovnet('eca_vovnet39b', pretrained=pretrained, **kwargs)
# Experimental Models
@register_model
def ese_vovnet39b_evos(pretrained=False, **kwargs) -> VovNet:
def norm_act_fn(num_features, **nkwargs):
return create_norm_act_layer('evonorms0', num_features, jit=False, **nkwargs)
return _create_vovnet('ese_vovnet39b_evos', pretrained=pretrained, norm_layer=norm_act_fn, **kwargs)
| pytorch-image-models/timm/models/vovnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/vovnet.py",
"repo_id": "pytorch-image-models",
"token_count": 7781
} | 372 |
import math
import torch
from torch.optim.optimizer import Optimizer
class Nadam(Optimizer):
"""Implements Nadam algorithm (a variant of Adam based on Nesterov momentum).
It has been proposed in `Incorporating Nesterov Momentum into Adam`__.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
schedule_decay (float, optional): momentum schedule decay (default: 4e-3)
__ http://cs229.stanford.edu/proj2015/054_report.pdf
__ http://www.cs.toronto.edu/~fritz/absps/momentum.pdf
Originally taken from: https://github.com/pytorch/pytorch/pull/1408
NOTE: Has potential issues but does work well on some problems.
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, schedule_decay=4e-3):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
defaults = dict(
lr=lr,
betas=betas,
eps=eps,
weight_decay=weight_decay,
schedule_decay=schedule_decay,
)
super(Nadam, self).__init__(params, defaults)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['m_schedule'] = 1.
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
# Warming momentum schedule
m_schedule = state['m_schedule']
schedule_decay = group['schedule_decay']
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
t = state['step']
bias_correction2 = 1 - beta2 ** t
if group['weight_decay'] != 0:
grad = grad.add(p, alpha=group['weight_decay'])
momentum_cache_t = beta1 * (1. - 0.5 * (0.96 ** (t * schedule_decay)))
momentum_cache_t_1 = beta1 * (1. - 0.5 * (0.96 ** ((t + 1) * schedule_decay)))
m_schedule_new = m_schedule * momentum_cache_t
m_schedule_next = m_schedule * momentum_cache_t * momentum_cache_t_1
state['m_schedule'] = m_schedule_new
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1. - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1. - beta2)
denom = (exp_avg_sq.sqrt() / math.sqrt(bias_correction2)).add_(eps)
p.addcdiv_(grad, denom, value=-group['lr'] * (1. - momentum_cache_t) / (1. - m_schedule_new))
p.addcdiv_(exp_avg, denom, value=-group['lr'] * momentum_cache_t_1 / (1. - m_schedule_next))
return loss
| pytorch-image-models/timm/optim/nadam.py/0 | {
"file_path": "pytorch-image-models/timm/optim/nadam.py",
"repo_id": "pytorch-image-models",
"token_count": 1921
} | 373 |
""" TanH Scheduler
TanH schedule with warmup, cycle/restarts, noise.
Hacked together by / Copyright 2021 Ross Wightman
"""
import logging
import math
import numpy as np
import torch
from typing import List
from .scheduler import Scheduler
_logger = logging.getLogger(__name__)
class TanhLRScheduler(Scheduler):
"""
Hyberbolic-Tangent decay with restarts.
This is described in the paper https://arxiv.org/abs/1806.01593
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
t_initial: int,
lb: float = -7.,
ub: float = 3.,
lr_min: float = 0.,
cycle_mul: float = 1.,
cycle_decay: float = 1.,
cycle_limit: int = 1,
warmup_t=0,
warmup_lr_init=0,
warmup_prefix=False,
t_in_epochs=True,
noise_range_t=None,
noise_pct=0.67,
noise_std=1.0,
noise_seed=42,
initialize=True,
) -> None:
super().__init__(
optimizer,
param_group_field="lr",
t_in_epochs=t_in_epochs,
noise_range_t=noise_range_t,
noise_pct=noise_pct,
noise_std=noise_std,
noise_seed=noise_seed,
initialize=initialize,
)
assert t_initial > 0
assert lr_min >= 0
assert lb < ub
assert cycle_limit >= 0
assert warmup_t >= 0
assert warmup_lr_init >= 0
self.lb = lb
self.ub = ub
self.t_initial = t_initial
self.lr_min = lr_min
self.cycle_mul = cycle_mul
self.cycle_decay = cycle_decay
self.cycle_limit = cycle_limit
self.warmup_t = warmup_t
self.warmup_lr_init = warmup_lr_init
self.warmup_prefix = warmup_prefix
if self.warmup_t:
t_v = self.base_values if self.warmup_prefix else self._get_lr(self.warmup_t)
self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in t_v]
super().update_groups(self.warmup_lr_init)
else:
self.warmup_steps = [1 for _ in self.base_values]
def _get_lr(self, t: int) -> List[float]:
if t < self.warmup_t:
lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps]
else:
if self.warmup_prefix:
t = t - self.warmup_t
if self.cycle_mul != 1:
i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul))
t_i = self.cycle_mul ** i * self.t_initial
t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial
else:
i = t // self.t_initial
t_i = self.t_initial
t_curr = t - (self.t_initial * i)
if i < self.cycle_limit:
gamma = self.cycle_decay ** i
lr_max_values = [v * gamma for v in self.base_values]
tr = t_curr / t_i
lrs = [
self.lr_min + 0.5 * (lr_max - self.lr_min) * (1 - math.tanh(self.lb * (1. - tr) + self.ub * tr))
for lr_max in lr_max_values
]
else:
lrs = [self.lr_min for _ in self.base_values]
return lrs
def get_cycle_length(self, cycles=0):
cycles = max(1, cycles or self.cycle_limit)
if self.cycle_mul == 1.0:
return self.t_initial * cycles
else:
return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
| pytorch-image-models/timm/scheduler/tanh_lr.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/tanh_lr.py",
"repo_id": "pytorch-image-models",
"token_count": 1972
} | 374 |
import random
import numpy as np
import torch
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
| pytorch-image-models/timm/utils/random.py/0 | {
"file_path": "pytorch-image-models/timm/utils/random.py",
"repo_id": "pytorch-image-models",
"token_count": 68
} | 375 |
import paddle
weights = paddle.load("paddle_ace.pdparams")[0]
assert list(weights.keys()) == ["weight"]
assert paddle.allclose(weights["weight"], paddle.zeros((2, 2)))
print("The file looks fine !")
| safetensors/attacks/paddle_ace_get_pwned.py/0 | {
"file_path": "safetensors/attacks/paddle_ace_get_pwned.py",
"repo_id": "safetensors",
"token_count": 64
} | 376 |
## Installation
```
pip install safetensors
```
## Usage
### Numpy
```python
from safetensors.numpy import save_file, load_file
import numpy as np
tensors = {
"a": np.zeros((2, 2)),
"b": np.zeros((2, 3), dtype=np.uint8)
}
save_file(tensors, "./model.safetensors")
# Now loading
loaded = load_file("./model.safetensors")
```
### Torch
```python
from safetensors.torch import save_file, load_file
import torch
tensors = {
"a": torch.zeros((2, 2)),
"b": torch.zeros((2, 3), dtype=torch.uint8)
}
save_file(tensors, "./model.safetensors")
# Now loading
loaded = load_file("./model.safetensors")
```
### Developing
```
# inside ./safetensors/bindings/python
pip install .[dev]
```
Should be enough to install this library locally.
### Testing
```
# inside ./safetensors/bindings/python
pip install .[dev]
pytest -sv tests/
```
| safetensors/bindings/python/README.md/0 | {
"file_path": "safetensors/bindings/python/README.md",
"repo_id": "safetensors",
"token_count": 347
} | 377 |
import os
from typing import Dict, Optional, Union
import numpy as np
import tensorflow as tf
from safetensors import numpy, safe_open
def save(tensors: Dict[str, tf.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes:
"""
Saves a dictionary of tensors into raw bytes in safetensors format.
Args:
tensors (`Dict[str, tf.Tensor]`):
The incoming tensors. Tensors need to be contiguous and dense.
metadata (`Dict[str, str]`, *optional*, defaults to `None`):
Optional text only metadata you might want to save in your header.
For instance it can be useful to specify more about the underlying
tensors. This is purely informative and does not affect tensor loading.
Returns:
`bytes`: The raw bytes representing the format
Example:
```python
from safetensors.tensorflow import save
import tensorflow as tf
tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))}
byte_data = save(tensors)
```
"""
np_tensors = _tf2np(tensors)
return numpy.save(np_tensors, metadata=metadata)
def save_file(
tensors: Dict[str, tf.Tensor],
filename: Union[str, os.PathLike],
metadata: Optional[Dict[str, str]] = None,
) -> None:
"""
Saves a dictionary of tensors into raw bytes in safetensors format.
Args:
tensors (`Dict[str, tf.Tensor]`):
The incoming tensors. Tensors need to be contiguous and dense.
filename (`str`, or `os.PathLike`)):
The filename we're saving into.
metadata (`Dict[str, str]`, *optional*, defaults to `None`):
Optional text only metadata you might want to save in your header.
For instance it can be useful to specify more about the underlying
tensors. This is purely informative and does not affect tensor loading.
Returns:
`None`
Example:
```python
from safetensors.tensorflow import save_file
import tensorflow as tf
tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))}
save_file(tensors, "model.safetensors")
```
"""
np_tensors = _tf2np(tensors)
return numpy.save_file(np_tensors, filename, metadata=metadata)
def load(data: bytes) -> Dict[str, tf.Tensor]:
"""
Loads a safetensors file into tensorflow format from pure bytes.
Args:
data (`bytes`):
The content of a safetensors file
Returns:
`Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` on cpu
Example:
```python
from safetensors.tensorflow import load
file_path = "./my_folder/bert.safetensors"
with open(file_path, "rb") as f:
data = f.read()
loaded = load(data)
```
"""
flat = numpy.load(data)
return _np2tf(flat)
def load_file(filename: Union[str, os.PathLike]) -> Dict[str, tf.Tensor]:
"""
Loads a safetensors file into tensorflow format.
Args:
filename (`str`, or `os.PathLike`)):
The name of the file which contains the tensors
Returns:
`Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor`
Example:
```python
from safetensors.tensorflow import load_file
file_path = "./my_folder/bert.safetensors"
loaded = load_file(file_path)
```
"""
result = {}
with safe_open(filename, framework="tf") as f:
for k in f.keys():
result[k] = f.get_tensor(k)
return result
def _np2tf(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, tf.Tensor]:
for k, v in numpy_dict.items():
numpy_dict[k] = tf.convert_to_tensor(v)
return numpy_dict
def _tf2np(tf_dict: Dict[str, tf.Tensor]) -> Dict[str, np.array]:
for k, v in tf_dict.items():
tf_dict[k] = v.numpy()
return tf_dict
| safetensors/bindings/python/py_src/safetensors/tensorflow.py/0 | {
"file_path": "safetensors/bindings/python/py_src/safetensors/tensorflow.py",
"repo_id": "safetensors",
"token_count": 1592
} | 378 |
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"title": "safetensors format header",
"description": "Describes the structure of all the tensors and their metadata",
"$defs": {
"size_t": {
"type": "integer",
"minimum": 0,
"maximum": 281474976710655,
"description": "A natural integer no more than 48 bits (current CPU limitation, not all 64 bits are used)"
},
"Tensor": {
"title": "Tensor",
"description": "Describes the structure of one tensor",
"type": "object",
"additionalProperties": false,
"properties": {
"dtype": {
"type": "string",
"pattern": "([UIF])(8|16|32|64|128|256)",
"description": "Type of the array. U - unsigned int, I - signed int, F - IEEE 754 floating-point. Number is the count of bits."
},
"shape": {
"type": "array",
"items": {
"$ref": "#/$defs/size_t",
"description": "Size of each dimension."
}
},
"data_offsets": {
"type": "array",
"prefixItems": [
{
"$ref": "#/$defs/size_t",
"description": "Start offset of the array. "
},
{
"$ref": "#/$defs/size_t",
"description": "End offset of the array. Equal to the previous item + array size."
}
]
}
},
"required": [
"data_offsets",
"dtype",
"shape"
]
},
"Metadata": {
"type": "object",
"additionalProperties": {"type": "string"},
"title": "Metadata"
}
},
"type": "object",
"properties": {
"__metadata__": {
"description": "Arbitrary metadata",
"$ref": "#/$defs/Metadata"
}
},
"additionalProperties": {
"$ref": "#/$defs/Tensor"
}
}
| safetensors/docs/safetensors.schema.json/0 | {
"file_path": "safetensors/docs/safetensors.schema.json",
"repo_id": "safetensors",
"token_count": 742
} | 379 |
# docstyle-ignore
INSTALL_CONTENT = """
# SetFit installation
! pip install setfit
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/setfit.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}] | setfit/docs/source/_config.py/0 | {
"file_path": "setfit/docs/source/_config.py",
"repo_id": "setfit",
"token_count": 97
} | 380 |
# Installation
Before you start, you'll need to setup your environment and install the appropriate packages. 🤗 SetFit is tested on **Python 3.7+**.
## pip
The most straightforward way to install 🤗 Datasets is with pip:
```bash
pip install setfit
```
If you have a CUDA-capable graphics card, then it is recommended to [install `torch` with CUDA support](https://pytorch.org/get-started/locally/) to train and performing inference much more quickly:
```bash
pip install torch --index-url https://download.pytorch.org/whl/cu118
```
## Installing from source
Building 🤗 SetFit from source lets you make changes to the code base. To install from the source, clone the repository and install 🤗 SetFit in [editable mode](https://setuptools.pypa.io/en/latest/userguide/development_mode.html) with the following commands:
```bash
git clone https://github.com/huggingface/setfit.git
cd setfit
pip install -e .
```
If you just want the bleeding-edge version without making any changes of your own, then install from source by running:
```bash
pip install git+https://github.com/huggingface/setfit.git
```
## Conda
If conda is your package management system of choice, then you can install 🤗 SetFit like so:
```bash
conda install -c conda-forge setfit
``` | setfit/docs/source/en/installation.mdx/0 | {
"file_path": "setfit/docs/source/en/installation.mdx",
"repo_id": "setfit",
"token_count": 380
} | 381 |
<jupyter_start><jupyter_text>**SetFitABSA vs. BloombergGPT** **BloombergGPT** In their paper [BloombergGPT: A Large Language Model for Finance](https://arxiv.org/pdf/2303.17564.pdf), Bloomberg presents the development of **BloombergGPT**, LLM which is specialized for the financial domain.The model is a **50B** parameters language model, trained on a wide range of financial data:* **363 billion** tokens dataset based on Bloomberg's extensive data sources* **345 billion** tokens from general purpose datasetsThe model was evaluated on various financial NLP tasks, including Aspect-Based Sentiment Analysis (**ABSA**). Bloomberg evaluated ABSA in an in-context manner using the [FiQA_SA](https://huggingface.co/datasets/AdaptLLM/finance-tasks/viewer/FiQA_SA). This dataset contains a test set of 235 finance related sentences, each is prefixed by 5 tagged sentences, which overall comprise an input prompt to the model. Every tagged sentence in the prompt contains a known aspect and a question whether its corresponding polarity is Positive, Negative or Neutral, and the answer (the correct polarity towards the mentioned aspect) is written at the end. BloombergGPT is expected to predict the polarity of a given aspect in the test sentence based on the 5 tagged example sentences. This is an **SB2 ABSA task**, which means that the aspect is already given and the model just needs to predict the corresponding polarity.The evaluation score achieved by this model is **weighted F1-score=75.07**, as reported in table 8 in the paper. ![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAbEAAACnCAYAAACfKGKDAAAgAElEQVR4AeydB1RUSdbHN3y7O7uT8+zkGcdxdByzjjnnnCPmgBkxJ0AMqCgoGYkqQRFQUQkSBARzAJSoIqBijgShoR+/77zupukG2jTBdn19Tp9+r15VvVv/ulX/ureqq/6C9JEQkBCQEJAQkBB4RRH4yysqtyS2hICEgISAhICEABKJSUogISAhICEgIfDKIiCR2CtbdZLgEgISAhICEgJaJGZvb88PP/wgfSUMJB2QdEDSAUkHXroOODg4PJWltUjsxo0bnDlzRvpKGEg6IOmApAOSDrx0HRA56WkfLRJ7WmTpuYSAhICEgISAhIA+ISCRmD7VhiSLhICEgISAhMBzISCR2HPBJUWWEJAQkBCQENAnBCQS06fakGSREJAQkBCQEHguBCQSey649CGywN3MBM6ePs3p8u/Zc2TeKECuD+K9RBnKZLdJPrIPb09PAiLSyBMErqemcleAMuE+FxPOVmB2+ixJqTk8lGkKLOPBzWtcuXKl2m/ujQeUaEZ/4WsZN9Lj2OPtwfZdcVwpFHiQmsxVuShnHpfPact5PuM6BerKFSi4q0vGa1y/V1hFqtIHWSSdPU1CwiXuq/PJ52paKmnpaaSmXaOgSqonBSjTpmdkkJGeRs7dUgW+l88nk5qWRmrGdR4/KfkzPhMKczkTFURg4H7izt2gWJ1O4GH2Oa02kJCUwc18deEoEwq5c636erxyLZf7VWFS5/7HX1SV/2xCMtl3ixFULy8THpKTlkaGiHFGOukZuTrqqJAriVEEBQYSHH2eW0XVS59/PYHIoEB2748h9bpmpDyyzp/l9NkELt3UagyKjITC66ScPc2ZsynceKmYVV8uMVQiMd3Y6OkTgRtpMbhOqkPNjmuIio8nPi6SnZsM6d1jHiFZFQ1ZTwvwB4glcD1+A6P7T8ExNJNCQc7djBAcLabSueVCzsigrOw252M9MKj1HgNXRBAfH8ehEF8spg7EcG0s9wWQl8Qwq0NvzBx98A/0w7TPF/zYwZSdgf54u5gxsNU0oit60hcqh7wgAduJA5hpGUZ2oUDh7ZN4WxrTo74BwYVKsk0/FsiMJm/Tfd5+4uIPE7nfjcUjB7DU5yKysmtYDW7HbKtt+AXsYEnXT/i512r8AnbitmE8nYd6VpGr9H46sXsW0v6zL+hvfhaxqxJJ/XykHSO7GuAansbD8t6zSuqqAWXCAzKOBrK8xxd8VnsaEVkyyspyCZ3fgTbD1xMQc1FHh1s1r+pDBG4et2KCwQrCM0XACzkfuJxxE+1JyhNTCNy5dBRf46Z83XghwXHxxB7ah/PykYyYuYMsGRRft2dQa0Nstvmxa6cpHT/6nBGr/PDf6cmaCS2Z6fag+lf/KaFK+XcYN+WbZksIi4snLjYUD7Mh9B7uRIpCD+6REr+TyfXfpP3UHcQczySvUh2VPDjKuvGjsQy6rKjTRxcCWGwwGY9TCpAUJRHrOcZqApNN93OlGITHGexYPpo5tqcVdaSoy+N7mdPrKxp128INrXfIydgznh//0xaLA6e4mv+ngPPcL5FI7Lkh04cEAmmbO9J8zAGN0Wkhu6d8x69jQxQK/fhuNpcyL3P50iVuVtZ+oORRFiciQ4hNuEaBluKK5ZNzL/sUkSGHOH81v2J0KLvL5dRULuRWNBIxdpn8AdlpqaTnPKDgbhapKSmkiN/UDK49qBjdFT/IJevyZS5rfLOv3UdGMTcvp6rSpJF1PY/SKjAL5OUmER0azqnMh5Tcv84t1ciw6LILvX7sy45KBC676c2wNkoSU8gpXGJ1m6+YG1gxEi19tIP+H/7IyhgZctk+XF0uqSxaGdGL6tHGMEqBp9hxZm9zIvhRFcGeOaBMuMuuST8zYEWSlkUnf3yMeS1GKUhMmVkezn0/ZLJ7hX2Ud3Ixdd8diP+tRNydo1X1XkzwrFp0Nj6myi+fIOet1cpTencnbna29Pi6OZtOlJe/kF3b/V/YapLd2MGgb2qzPOwBZcJlfNdvJaOiuquV41kCZXcCGVWnF95Zmoop5/zmLnSYGEJ5Fdza1o+fe26j3EAQSo5jXPcTDLfepTDJjW1RynLKZVFM+aEuFkeUwskL9+PmkfksovyhcUT5f+njrcZfEFIx/fVDxjhcV7S5Mm6xudsHTHCpSrhlZbfwGfsLY+2z1e1TFPZx5iY61zEk+r54J3Bl91iad3PimgaUgnCRdZ3rseRAeb6F7HRdyKQGHXC5UDEIFuRpHAgwofvHBhwsV5k/FJEXy1wisRfD7SWnqkpiZWXXsO/zFQNWnVMQQMH1E5h0/IqRlkfIeaChwchJ372UGUt2knI3n2snt7LQ0JzoXGUcsTPaudSQ1b5J3M2/Sfy2RRiZhXFLdMnJbnJ880QaNTHkkHpUJpB7cAHNfh6J7/m75N88zeoetZjsdJKkxOPstZ7A8Bm7FSO8RzmHWdz+K0ZZxnL8+DFiw9yZNXAhccVF5JxZR+dvB+Nx4izxB92Zb2CI73llyykTrhBoOomF9rFcLSgg89gOFnTrh3O62OCKCTWqRePhu6sZ/ecRYGFFoqpjLauOxAoDGPLx1ywJLkYoPsuphPJeuDKJQVH6CZLuaWL5fGpQlGVN6zfb4na5ch6lHLFcR7i6o6hKYoVJpjR4syc+V9M4eeKWquOqTGJyck6drFYokcR8A/NJ9RjAzw1NOavo+SuTWB4Xj0cQGn6MbI1xSuGNRCIPHOBw0k0t8hU7yUs7hvHjD4a4bFnPruRy7KoV4RkD5aRsak/tjk7crgRTcY41rd/qireqR65CYvIEFjX8gHHONyi8cJyEm8oMKpNYmXCN0ydynlGePy5aZRKTF0ZhWKsGi4OVjetJJKbQJRGL69oglZVlYdHuQ0bb5yIXMlXXN7SITqy3VBXGSogK2eXpzaGVLemzNFE9gCxIDCEi2ZWeEon9cUrw+uasJLHaXVayZ/9+ggLcMZ00kOkWh7ilHkgV4W1Qn2Vh2v6v4qsu9Gkwm6MVg3xuBI2n/dCd3BUEMlz70XX6IfXotowH7BzfhFk+txVw5x3yYv2S/szequxIy4QcQvasZ3zHpSQqzKciAia1YmW0skMrLfRn6JdD2a94X4VMZUIumZkF3A0OIOoRyGV7GNNgJkcU4pZywrwZHWYepgSBDPf+dBx3gIp+VU664zLc0+WIo8qVrd6ht8l5dePTpRdKEvscQ/dMxZzXpdR4ti4Zwsj5Qag4XCNpVRLTePhCl/d3G/D5O6M1yEpXNiKJfcBI6wyuXMnh4rlwrCb1ZYZDonrUrkxZmcR05QdKEiukTMhmy8Dv6bHoKIVUkFhZ2XX8jEdjFX2P4jvBzB1mzpkiuBm9BuNVEdyUFXMp2Iw5FnFagwVx8OQ25Gvq9fNGMfjXLcIzPinCx+BTmhjs0/AyKJPKi3Yx8L0vWbhPqdMiCdTqaMOFK1fIzkwi2G4SgwyU7jjNl1UmMc1nL/NalP+7FosVbXjPLgfmDR/Ikq3JGm1PtyX2YI8BX3wwjkj1wKe8JIVsG/ERv44NplAWyJAP/8v8PVUicdd/BJ9/PFHlHhdJbAe3szbTue5cTingLeRIcBQ3HrpLJFYOrfT7eyJQ1RIreXASi4FtmemepurMKwhD883Xtw/gl97btTqi4hv2dPzYgLCiB2zp/wlTPNVmlsIlccmhC02G71Zkk3fIF/8ET0b3syFHDo/TQgg7v48pWiTWmDGWewkK3MrKKSNZui1V5ZITZarFEDMfPK0nsNAuRz1CFEnM4OehuMbEEbnPjcVT5rInXYZIolv6fcIkt4eaxaD0wU1uF4lzXdmsbf8OPZYmaJBYPhlHI4iIjCQyMoJDJ7IV7xdJbFXr9xlgGkFMTCxxRxPJvqfLevj9SezB/vF89eYIQtV9isD15GgiIpRyRh25pOrARBJ7n65z9hItynn8PNfz1KMTDRyen8TExMXXvOn/XUMsDt9SuxPzji+kWXMLLhSXUlpawP5Z7VkcepzFzXqwXW2lX2JNp/Y4plTIIsgz2Go+gXZf1cEsXLuONAR9jssidoz9Lw2GBlQhsdLH3vR5+yuF1SxmKJLA1w3nsS86htjDx0m9llft4qYnkVhx7jGCdu/nQMhpxQIgKCX7eBB79u8j4tQtSu4nstN8AuMWOeG7w5dtTpbY+6dVGkw8R/E0ola2xAqv7GZyh25sPqzEUZclJiuW8SBoLF+8OVJDl8ozLsBt4Ac0nxDGY9kehn/yIUZ+aoUrj8Qdn0H895MpxKoISySxR2XXcej3C4sPFFDyMIaD0Q+RPfLQIrEyeQGXj2zFO1xp3QlCBt5rzLC2tcPOzg7n7XEKr40mrncEgVtJgZiMH4mJoy87fL1wtLTiQGq5I1gt1gtdSO7EF4LtZSeqSmKiRA9CJvHte+Uj/cokJuda9jVu+A6mTuct3NMowuPLG2jz6XgiivJwH/QpYx2VVpcyipzzG9vRbPQ+xa1IYkFZD/A37I31mXxOBodzrSi4EolVWGJ5Fz0Y02k6oYqOsEKm4pte2LldQS5c48pVucoSm0zo9VtkxK7FYKg9GQrLLh/PIZ8xzlEpsWgxHAvaip3lBtz2pSJ2OkdXNKZen+0alkAp965d5JjzQL75fDS7Mx8qOjcliX3I1O3P0nh+fxIrvuVOjw+aYHOuYsYv/3YWZ/dOo9a/O+OcfF9FxEpLbJRNZTeQRqUpLqsjsVJOOo6jR9eudO3ajTGrDitilltiyhwEMncOp3bdGSxz2qXokO/uHML3vy7mgIL4I4mMiiPlqjf93+1LoMoELuM2Nj2+Zt7u8k6xlCSvDey+IOOC9xB+rGXEkfJplsqiPvO9wKUtPfi+wUoyKrhSkbrgzDLqvdOfANG3rSKxWm0dqrgdK79KF4nJbu9lqZEXV+WFhK1ZTUy+wJV9SzDZlo3scRRrV4Yqsrq7azSj1irnSkV3u0WnTrhcrCRc5Zc+w31lEhN1+eTKptRq74Do5quOxMT3B/gepvi6M53faYrteW05BPkp5tf7mMnut5GXXWNz94/oY5pcidxLiVvegF96b1URt9ISE6v5ur8BrYbsICUiWOFyLq1EYo8yjxHuOZlxK5SeD/njfWxY7Mx2Ly/c189gsUs6hVVwFcEoZJtBDzVuRdlWdG+7kWxt8Z8BtapRJBKriskrEFIdiclJtevMt7WWoJzWqSAMsUCCPBmPLVHk3d7B0DqDCFD7z+SkOPSgw4Rg8sTFC34jadVvm2I0JaYrEzKx6l6fxfuVo8O8Qz4EZcl5dHQR/Sas40D0feQluklMXLW2ofM3LNgrdnzaMon5FyZ64nNYXFRR4U4U5OdY0rQxG06Knb1AbtAE2vXfpl45JY7+zNu2wSFV2QJkt/cxvn5rrI5r+EiBhwenUKvmfMXqRGVZREvs5ZGY2JAPmTWn46QQNKcpC5PNafzW0EoLOz7gxUhMLGnVj+yOL94BFeQtDga2jfqeuv28FCRWlGlDjxYmJKv4VbRAEjKSsejYEXuV5SWUnGB+y954XVbiXpDiidW2i8oBQlkOWwZ+Q6dZ0VpWflVJnh4izz/EzPpNWHO4Ql7II3h2Q3ouPqK2gkQSeHESk3Peqgdj1kQQGrKbkKM3KBUuYtFtANYRoQTv3sfp68py3t1lwKBlR8jOyeRUyEZmTHEkpZzHn14cnTEqk5jodfCb+C3NDIIUrvPqSKwoy4kVG8XBWx4h8xrS3ThejYf4ojvRs2jaYBmnVdDdiZlLk/oLOKUBpTw/CsP6TbGMKw8sxM/DV/FO+eNQJtRpzpztCYr5z8okJr4j7/A8JporSUy0zAoKQShOwHtLFHeEqrgqUSxk26hObIjPJjszgb1W05lnn1jF2tYJ1hMeSCT2BHD085GctINOLOz9DV82noKNkxNOjjasM53CwP6z2ZUkduQCd84HYtjsU/ou8sFvpyfrp7dn2PIkSkVSiN+M0Yx1+Mcc5aCXObOMt5CoGkGLS9GjNhsxd+1OYo5G4bFyOsucTikUXCg4j+eUrowyjeSKLIV1oxcSX/CQxANL6PBjX6wjMrmRGsSs1t8wcLkv/rt8cVk/jdGTPcko1pbJ398f322WjGs5mG2590ncv4A2X3VlXWgGhcJ9Agzr0m95EJ7uoZTxkKNui5hr5knkqUTigl1ZPmoK2zVGw4WXA1k4YihzNwZwJCmVs7E72bzWjGnj1ykWdohWWKirKb1/eJfWo60IiFeuAKuujouvHsbb1ZbpHf7Ld82nYeuyjfhMZVOsLv7zhImLCvauMGDEVEuCjqWQfOoAzmvMmT1ioWKuTFzEEuVlybBf3qHJoNV4RV6uNIpWvq046xBbXTYxsdXH1GxrhL2rD8dE/241n+KscDbM6ECLbgvxjquw7mS3ApllrLTEQEaSzwJmm2wlLDqUXZ7BZBRBfup2FhmvZ1dYCNs3LGSt70Vkj8+x03IuvX6uxah18Yq/J4j47prTmo/e/pERyxyISNXlpq1GwGqC8i/twnjYeKx2J5CZeRrfVeOYsGgv1xQkK+fy4e2sMfiFT2sOYb1bFFeqLbqcC9HbcN48lV8/+oyes23x8D5MriKujPB5DRi2QbSw8tg1cxQuKdHMrNcBt4tySgv2MnO4rUIykcT6z48gOSWZM0cCWD3HnEOVFlRUU4QnBMnJjPVgxdBafFp7NFZiG3bYxMr5Ixk0agPHbgmUCVkcdFtBnxpv0HToWhydHLC2mEPP2hWudUF2Ea/5w5m2eg9JlzM5FrCKcSOWEJZVYemLC59Sds1n5DgLDpy9zIUz/piNHcnq3dkKq1/Ut3DnuXRu2IlF9tHkymXEmE/C+VwpxVdicF8/ijrvNmHmJj/OqhbKaJKYspBy0l3N2JomAlsVV0/FikeRxDqwKjyZlOQEYgItWGAW8lQr+gkgqh9JJKaG4nW7kHE3+xLXHmoqfAUGZbL7ZF+6wqPqH1dE/MOuirl9OVO9jF58TZlQwLX0lCfMY0HR/YuciD3Mucz7lVbS/WGCvlDGZbK7pJ04zPHzuRRqLzB7ofx+r0Rl8jzu3C1Uz1Uq85Xz6NZtjT9c/15ve1o+ch5dTeTIkXPcqPo/kKclfspzgTTbfhh5i4M+GYcWdWVJcDob+hgoFiEJJUeZ12m6Ig9Nd6Lo8os3+ZWJTnefkv+f97ik4AoJ8UdIuVZQqd4qZCiT55OdGM+xpN+ub5VJTPTWWA6dToxifq0qrsuDxQfa7kRBfpqFTfqw406FjC96JZHYiyInpZMQkBB4pRGQ3Qtm1SIXDp/ez8rZGzhTIJAbshqzLYc5dmAVi9Yfo+TBOXYYtaDlSFv8/Hfh5baaaePNOayySl5pAF5A+MLseLxX9qPDMCv2H89V5CAvCWVKu/mcVBnf1eF6+/xuprX4hSk2fvjv8sF5zVSmm0Wo5uReQBCNJBKJaYAhXUoISAi8XgiUFd8m81Ku1h/+H9/O5HJuxZ/8Xy9EXqS0xdy9eV/L7V0dri+S87OkkUjsWVCS4kgISAhICEgI6CUCEonpZbVIQkkISAhICEgIPAsCEok9C0pSHAkBCQEJAQkBvURAi8R8fHzo3r279JUwkHRA0gFJByQdeOk64Ovr+1Ti1CKxS5cuERISIn0lDCQdkHRA0gFJB166Doic9LSPFok9LbL0XEJAQkBCQEJAQkCfEJBITJ9qQ5JFQkBCQEJAQuC5EJBI7LngkiJLCEgISAhICOgTAhKJ6VNtSLJICEgISAhICDwXAi9EYmWy2yTHB+Hl4cmew9kUCg9JTcl+rhc/OXIB1zMvkpGRUel7kcs3NM+6enIumk/zrycQGRRA4L5oUq/r3oJadj2cvVG3de5BppmndC0hICGgPwiUyYqR6dE+lPqDzP+2JM9NYvcTnBjXbwJ2oZk8ForJPeWHxdwedBwV+LshJe6knhi1lbF136TjjJ2ER0YSHhqEp+VY2g9wUezSnuMznMbdnFCdVK7z3WXCfWKsxjNx+V5yikAoTGP7guHMtj1dzZERcpKt21Ons7PiPB+dmUoP1AiIdbVnYW/M95UPDASuxDqwfIkpJosmMnTEQgISnz7wkN0JYW7PuUSrsikrzuN61in8N81kUH8L1fEy6te+phcCuScC8fLzx8fFBMOJ1pxSHaecl+zDRht/9u/xYMP6ALQ2MlehpSuO2Ebit1ri4O2Pt/1afI+XH9GhnzCX3IrHc7MttptXMnX0NLxOKQ88u+I+hJ8atqez4iy1rnTr1p91+zVPzhPLU8S5CB92+gewc6sdNttPqfoBXeH6iUFlqcrKbnEkcDt+/jtwMJ2M0cYjPFJFKnlwAndLO3z8fbG38CCxEK3jWxTHHVWrVwK302PY5TSHnp3NFKdBVH6vPtw/F4nJ7gcx9qeW2JzVPmbh/tEFdB/5+5GYCEx1Z+mIOyGHO4okBkLBZZIvPHiKxSRwZfdYmlciO0GexqoOv7DkgPYJfuJR9y4rhtDqgy5sy5aGdE9X0FKS3Prx6d//zdStyo5PXrib4Z99yCCLREqEVEyb/Zsv6yxH4xzIKtkK8nTs+n3JO38dwF5V/1mY5MPK1ZtY0v8rPv7EkDjFDtlVkr5WAYKQjnnHWiwNEndez8N1wEcMWXeZUiGddX0N2KPYEVzgsttIJttna7UNMW31ceSctenHOMs05MiIW9OdaU7ZWvvg6RfIxYSZTccrTakQdw8aUvenRZyVyTjsvArPsENEx8QQHePPCiNrkirxcfEVFyydyk8UL+X4GmO2ZQnoCtevsuuW5vGFdXT8fhpRBVBa4EWft9vgmilHEFKx7Nsf11Q5QslJTLsNxzdLzjWNo2t06ZUcgftXLpKT40jPOhXn8umW4uU8eQ4Sk5OyqT01W1pxtVL/LpQcZ+OaA4oSlMmuczbqACFR57it4DqBu5dPcjjqJDmF90mNCyc24YbiuPiiu9lcvJTJxYwrPBIg/9ZlLl26yNV7pVVI7P6xOJJlcD/hFJBPVkIcMUfF49x15y8eEWDR7mPGO2ueVCyKKZC6uQO1OzppWVxFGR647cvAtud/GWGt3Qm8nOrR77fePrqe2SbTaPnP/6hJTDzQ0nHKEJZ6X6ZEFseMWm9Qq4M9VzUajWapxLPCDq2fzdLpbXlPg8SUcWRELagtkZgaMDk3U86SnQ/igGtlmxos3J9P8XU7utVfQpLq2JxHsbNp0cNV6/RuXXFuyw5hWKsH3teLePhA91EeahFe+kUeAbNbM22L8py1oqyNtPpPf3bnycnOVJ6RBXIu7rMn8Lz2YFsUvSjLmu4NJ+CXlEcZ9whYZkrkfd3hL724zyhAmXCblLOZCqvyceZ62nw9kUP58ChmJs26OHGr6CEPdR5nU71elb+65I7z/wqJiafyfkKjEXt0nsYpfxDFymlrOJwro+DqAUwM13EyTyD3nB+TGzdmhkMQ527d4oBRRxYEPqQgJ54Ng2vQYVIwN+QCN0+aMrj/WmIzi1Qk9h7d5+0m5IAPy4bOUxwaKAIruj8S982mdXMLMuW68w++FciQj75jeXjVYfyDoLF89fFEotWPSklwc1QcUZ7l0Zfara3RccZged2+1r/FVwNYMteHizcc6aBBYkpQSkkOsWPZ6Cb8/OvcSof0acImcDFgMWZemVxy7iaRmCY0Oq8Frpz0Z/P8UczceJSHAhScWUbzJivJUA0U8k4uomnjlWRonAWnK05C6lpafNoZE3c/QiO9MZlqRtTTfPQ6ZfuzHwhkbx9Ew072Wsfci4d9bnJMqvY8OfH05Mi1Xfji3Rp0HzmXbSeUJ5brCv+zS/Rb3iee1n0kwB7jUeNxPnIfATmJ61tTq+MiPP3CCPUxxcgkWDFwr2SHKAb2lfWqXJb/KRLzG/8FDYYG6CCxUo6aNWfM5vITc+UkW3Vh8OpU5DzAsV8dzKKUjHHNrT+DVojhUHjOjE5d7BRzW4+O7GCf4nTQcnfi+wxdd45L6SdwmWVCRPm0C1D6eDtDWokkJsatPn+rxACGf/Ityw6qmaq8XniwdwxffzJZdZAbiBaEw1pvLly5QuY5Kzq815otihNJ1UmkCxUC4lHkm2at5Mg9geLbTtWQGJQW5XE9bScGtd6n5chtqE6018LwUZId81cc4qEgkCWRmBY2T7spyA1mVtdeOBzJJ+/EQi3SUpBYfRNSNEhMV5wTZ0yo988OeOYou7Uct760nRhGJS/c08R5Kc9lt/czq+80grM1zXwZsabjcdDhvxZP1g51tWWblxUGjT+m5q+mHL8vDoyrD38pBftNLy3mcuhcenVfz5n8Uo6ZNaJmGxtyBbGfvI1dn9osCSkgVvRIV/PR1Kvyx/9DJCYgWig/NFhF5b69rOwG+wLD8Bz8KZPdKibxc1x60mxUEEUiyfRvjvVZZavKde9Pv2XnVMdjZ2LZrT2bk+5y0Hev2r1XeU7s0flELmk0yiokVk3+GxMus6nbJ4yyuaY1PyC6GxLXtaJBXy/1oWyFSU6s3BjAnj172LNnF0u6/JcBq8V5AumjjYCMaJNO9JpqwYYNG1i/cgg1//EPWo5az864WzzODmb98o1EKzqWYvZO/YZ//O179QCmPC+h5BgLO3RilsUGRT7Lh/7EG3+rx+R12zipPnBQcieW4yX+KhY8xSVxR6GUpRw1bUTtzlvITl9Lu3pL1e7EvDgjmrSx1nL7P75QfZyL12zp8MFIQlUDxAd7RlOr0Rou6bniC4WJuJluJDZXQHNVorwkhhkNDdTl0cRPnEbI9p2N+R6l9SXIMvGa2IQJDrlkVRt+o1K/oZ2bPt2V3EniaOItRX8lyM+ysMH7jHHMJcO+M43UhkcRPqO/oJ95GvapFRWsS69uq8y1/yESA/njIyxs1ojlB7UXRBSk78D74B0SrDszxFxpYYlHfscua804+yylJVYNySg5SSDHayjtxpriG6xaaqVzYUeF2jwLiW1KLOVWtDHNf11BctE1Ym0ew84AACAASURBVP13smfPEbILjjO/ZSusjpabdjKOOtmjvgWu+w3np0arqxB2hQSv79XDa+mkpqSQkpLC6YApfP9//2L4ugSybj8my6UHb/z1HQw9xbosxHfMZ/zzbw2wPlNK4TkHBrXpxrowccVYHlfTUxV5pKScw2vKj7z11y44ns3krtpwlkhMU8sep62m6bsdcVeMImVEL65LnS4u3CiKZkaTsYSpRtc3dwynu1E8MmRcS0nllgzksurjFMvPY9a2N17iUB3I3TaAZkP9Ua7303y7/lyXybMIdd1BoqK7ELjo7aFe1SqSdetvZ3BErUNQVnKd1JQbiIPXBMuJbDhaMVeWF7+MRU5XOFNteO4rQmJyzqxpzk9tN5MlB3GAOKfOB4x1ukF+yhq6dHfglsISu49z/++ZvfMhwQ8rdEOXXv1PkpioxsW5wSwbNZT51kGcSk4kOtAFe8847gggyFJwXzQX6x1h7Pe1ZMnynWSVysmI3MSwet8xZHEgZ8+HsW5UPer1MCHkvGpFW8F+JjSfpjZxy4Qswj3XMKj2OzQdsgZXn3i1hSbKID4PtRtDwxr9sAq9qDP/Ycv2kF5YTIrfQibOmoGFSwrZJ6yZ2H8ilvtyKJXJKBYuErhuIi1qd8TMP105knl8Hj/zgdR4qyYjTT04quWu0J/G/HIlkZO+ZxXTR7Xgszf+Q8NeM9m09wpF92NZZTCIqcs2YLV6DA2/rMPY9crlvo/iTWj8xY8Y+4odivJTJlzGb9Ushrb8nDffqM3AGRaE58gpygzEdPZMhrb4L++835jhM2ez2iulPNlr+Svq/R47W3YfTiEp1p5R7fvhekZsQ3LSdxgz1zqezMxDWEyZR/BVAXlJLEYNGmMRK3ba1ccRrZPrkebMXrGPxPP7WW4wBb90DZeH3iFdwMEljfng3Q/44APx+x41fl3DRZVhkRdvzC8/zOVUBU+RF7eApvUWKkoiu7Ef8znr2Hs8nbTTB9iywYukAtAVrnfF1yFQcfY+7G12cSQlhXCHMXTrvRmxey0T7nLAfCobgpI4c2AF4yd5klkKMg3d0K1XAleP7cR+1WBq/7cDC+3ciUrTAFaHLH928HOsTtQW7fGddI7HneDS3aqFKs27xZ38CnNVO2V1dwKFBX+sF75Mnk92Yhyxx4NYMHw+8ffyOG1nR8Qf+9rqCvtahJUW3CIn+wb5+twfvpI1IeN62hFi4pK4UWleo/DWeY4dTeGOhhVSuYi64hTePMeR+CRuvOLtoawkl/PnriumKiqXvfy+TH6PjOMxxJ/JQXPBnq7w8nT6/lsmu0XykWiOJuZWmtMs4tq5eI4l5lb6f5hmiXTrlWYsfbx+YRLTx8I8m0zFRC6sy7/+8U/q9nBTTHg+WzoploSAhICEgISAviHwGpIYCMU5HAkOJ/Vu1YWm+lZBkjwSAhICEgISAroReC1JTDcc0hMJAQkBCQEJgVcJAYnEXqXakmSVEJAQkBCQENBC4DlI7MV2lhcKczkTFURAwD4On7uh44/S4n9gcjm4K1r9vy0tKSvdCI8ziQ7wxntnCKn38slIq7zXm0B2hD9HxXWl0ucPQKCUgrzCP/Q/dGWlBeQXPs/ioD+gmFKWEgISAnqPwDOTmPiHuBXW62jx3RvU77eJ+VZbmW/pxPip/anbahUe0ccY3fEbOhlF4xFzga0x6Vg7LqJNxxks9DmPZ0wiK8wm0qq7KWtCxOfaX6cdi6j5nzbM9E+v8kwzrtvBXQzub4Tp7jQ8wmOYM7s39TtuwlUjP49DkfSv/z6djOJUslS8S+9r5BUQUF4SQesvP+aLnxpRq2FL6jZpRd0mrekwcQdusWeYMrw9rfpNpu+46XTt1Z0eM/fhrlE/5fXpGXuGJYsm0GnwbPqNHE63MU5YRyjrytZ6BJ+8/z3f1W1K7cZi/uK3KxNskl8BhP4oEeWYTG/HV7Xa0KxrH1p2m8PyAGV7cfDcyPBZthitXsvwKXZYRVbovBLvNJ1pbT3W0m/MciYtXszASU5sjqqcVr/unbYvpM7X9ajfvh/N2w9kjOURRTt33R/KglUr6Ni0NWMdk3X2Iy4H/BlvaIKhuRUGk9eyJuwCnjHnMV2+gLErtjBj0QLGrIioVmfLdVffft0PhdC/eQ1qNu1Bi8496TrOE7tDFxT98tKVlkwzt2bUmKH0mLoTpyptMR2b7d5MNR5Lg0YzWaNqg2IfvtF+PSNnmTBswgja9lml7rv/KA1/kXyfmcTEzD1jjzG06Vt0nXtarSCeMUnMm7NKcW+/cz8b9ysb1ZYgO379pj0zdlSQkmdMKiZTf6V+D9dKQKZhvmgmDeu9T4eZMVWIR1NhbCwG03rUXrWCuUVspc8wWy0Sc/JdS892jfm2gSk2h7Qb4IuAJKXRRqDkrgt13v2Uz7//ia9+qM2X33zKe+93Zub2ZDxjjzPs1zf461/+zr/fq0HzQVasD9WuA2V9pmO5qh+fvTuAxSEX8DgUQd+6b9Fs1G6FLq2b3553PvyeL2vUVrzj04/e5Ifmq9kYcUFbmNfqrhSTOVMZYGzOJFNvrFSDQY9DYQxs2Ze5e0Wc01m7oBdd1YNJJfaKtldNWrcIL9p/15YZu9IR4ywZ344+JifU7Vuz7enLteM2M/oamDN2wWZMvBPUsroGx2Lpf4hhzWsx0rZ6EhM7+0GtOjFlaxrukQH0btKLWTvScPJeTPMOG3CMETv+eEZ27KrVd+lL2XXJ4R4VzEiD6YwxXsPsTZFsURGVk/c86nw2gmWhF3ANs6T+v5tg6JumxkyZXzr2uyKw9FtBva8nskpFYm6R2+nbx5wN4SLJJzKt7zc0HroLtxj9aoO/icRsHXxZG3EBO9dA3ENjWGG7nRXeiWyNSWPFjGZ812Q1DpVY38lvCTXfaKVlcbkfCmXinC2YzW/HN78sY3Ml4tGsODvbMXz9zXAW+CYpKkIcQa1yCVGTmuLdC1dg7LOSOu80Z5oGiYr5SJ/fjkDBmVX0mxapGGx4RB9mUv+eGGxWDmxEEhvVfThLQlKfOBgRrbCx7d7hvz/OZ2OU2EiSmdbrUz76fJKiM104YRTTvJWNzTnQlo7tZrBiv7JD/u0leFVzKMVs4SpMw7UHBc6By/nle0PWqKwvW5vR/NRMu+2JBFVdWgePGXz/3iCWHVTmab28Kz+0tFJ3gpptT1+uHbdbMG1D9SQlDrSfRGK2m0dRq7E5tgfP4BhaPsBOZ7VRK5oOC1D0I6Iuzh5Uk67Gxyt19tq46wseohzuUSFMmrtVQTCacnkcOs4aFyWpOfnOp9bHg1lajSdMTOMSZK5FYq5hW2j/cy9mKUgvjRXTm/B98/UK3dCnFvRCJNZ4iD3G66zo1X4CS1TK73oghCmD6tF6fBgeMeeZ2OUDfulVFVS3CAeavPspA9dUKKHj9jUYrk3Aec8a6r7TDEOfyiOFCuXxOBTFuB4/8K+/v8nH37egg8FmrZG+WJkTjNxwjj7CiJYf0tZQ2dmWV6w+gf8qy6LEM40V87rSceIB9SBCJLER7X6h9chZ9B8/jpbtRjPd+XQVQhOthz4//4MvflmmsJYVHcfAr3jrnz21Og7RQhvethOT3FPV4a8ybr9N9lJMZk9hmJkzRmvWMmCUucK94+AylR9/NMJS5Qa0d57EDzWNsNRwKSossWrSOu9ewc/v92FRmNjG0lk7rzWf15ynGFiUtxl9+3Xctpx+Y20wWuvEuPFTmep0Vq0bTyaxNEwMG/FVw8lMMnXHePkMeoxxwfZQKgtG16HFmGCFnoq6OH/kT4r76tzg+oaHKI9oiQ0fMZsZa7YwY/FsBs8JVA9ERMtyibkpXTsPZJLdqSptsbw8lUmsPFz89Yg+ikHbr+gwS9mf/jY9/n1TvxCJtZmyn3Ve/owZMF1NYmJBrZd2U5PY5G4fUrenZ5WRgVuEvYLEBq1VkpjSzTGf2V6xbNwVRP8m79FmUoROoMX3iEpmvXUrk2YZ0qjm+9Tp6qx2Tzp6mmGw9CBWu2JYMr0VX9ZegJWGj//3he/1zU2shy37bGj6SStmaMxjivW53jkQO4V1lcjk7h/y3ntVR3+iW6fnT9okZjToa978ezd1hyR2quvNe1OjgRm2Gtb564s6uIacUbnO01g2qSktxuzD1kmbtEQSq/H9dNZpkJhYX9WldYs9i9GYzvRbHovrwTAMhzTgyx/1m8Q8I87ipCDdCzhuN+LnWkbqsj6JxETdXDy2Nl+VD5xijzO85fcMXHeKeSNqqUmrnMSajayYthDx0+evKLPTAaV3Smxbveo1ZLxbxcBPfL52/TgaNl3Aqme0xCrKm876dWNo2dsOG1Vfqk9t8IVIrHxOzMEjiI0aDaWCxNJZNbc139SrOifl5D2XGu90xXi30pR3j9qLwdjFzFzlwKxVDkwa15KvntCIHJ18WKXhTtmyz4qm3wxhmcJvm8oy45kYmCvzmmE+g1rvNmTS1grLTp/Af5VlEQnGYn47Pv5wJGYa9eHsa0mHduOY55umGGwYD/uO//trTQwcKxqU2Dg8oo8wpPG/+G+tBVir3InT+37Ge+8MU3cW4ghyWPO3+aX3Nq3B0KuM22+RXZCdZ/KUDaxVeT/Wz2/Lt40tsPaeS+3vpqrdiXZ2Y6j5yxI2aRC/e8SBatOKk/yeMQlYOLhjvHYXy+d34KcOdrjoaactdsbiAozZrucVeuKyfyU//7M9c1Su5ieRmKizq41a8GN7O4U+iVMRE7t8RMtxwSyb0phmwwPV7kSjwTVoZ3joiYPpik7+5ROcg6cVBkuClOWKPcGwX99WrF1wCdqHidsxRTnEvrbrD2/TyUi5EKay/LosMXHhzyBD5UIR1/BkRV6/RY9/77S/icQqg1BBYhdwC95Gh5qNGOt6nvX2tkxb6ciKHQnMN2hE81HKyUExvYOLOVM2KhVSvHcNtqbh+/WZ4FlBPJrv2Ww6mZFW4rybUnHconzo2sKQ1REXcI8MZNzs7eoOzzP2FOM6fkLLsSFqZfy9AXxd8xM7gCk9P+KTr6ezVmMgY2tjwH8/bMtU91TERT+iJfaff3dl7t50HD1MaVyvNSPXn1TMey2dVJ+PPx3HyogLeMaeYFSrt6nZZlNF3UY48+sH/6TpCO0R8euKeWneTjq3WsxqxUKZNEwmN6bRoB1sifSiw48DWKxaQGNl0pMmg3bgpvBYhGAXIVph1tWmdY2OZ9a0+SzyS8Mz9jSTezVgoEWFe668nenLrziXOmFgVyY5KvsAJy9jfvpqgkKHRBmrIzGPyHgsth5R6JXD1jnUb2qmWLkn9g+jWn9JH9Mz2NqMoUEnewV5K+ZrO9ZlpE3FlIe+lF+XHFZmo+lo6K+QX1xj0LtuTUbYnGPZpPp8/csyhTfKPXIXnb59h85zRBITvVlK3SjPszoSc97pyeTlAQpPl0d0DIYLXRT9qz61wWcmMXGn47mLjGn09ZvUbGfMxGU7tFw8W3ZuZUi32tRoYcRS1eoXB99NdOsxmm5j17AmcBeDenWhy5gtbI5Mxi0ijVVr59H85xo0G2ynmtxPwnT1LOp98SY1Ws1k2qaqKxU3mwyiQY9ZGCzZxsptgUybOoaR5nE4+TjTv1dDvq8/gfnblARo72lPz7Zf897nXRi6wFthCusT+K+yLAqC6vYBX9VdrrUQx+NQNJNH9qfz6EUMHdePGt+1YOgqZT3a2k3n64++pc9yZYfiFurPwLYNadp/KQZTB1K77nDmasyHuoaL86f/of308r9tKAcurzJuv032ApatWMFsmxBWbDalQ4fpmASJHo00VpmOpdf0Haz33c7A3hNYsCsdtyhvOv5Qh5GbkxWry6pL6xEdi0H37oyzD2PREkN6G3rjqGHBlXdw+vRr52LDBFMfVm31Yljf3ozacEIxSHXZ7ceUeUY0+PpTGg8yYeq6MAVx2dlNpMZ3kxTXntEnmDVuOCNX78N03Uza9Vyr8CZ5HDrEhMEjmGgbxerNC+gy2K7KymZ9wqCyLG5hu5k2ZzPLtgZjbDycjqM8FfXovNOZ0bNsWL41hLnG/ajffBkWYRe0dEO0UDc4bGLUxG58/sGv9JptweJtybiFedP5x/f4z9vvKr9vvUWrcUqD4Lfp8e+b+plJTHxtZeCe7T4NB/8gltrsYurI3gy3OomNpwmTVyr9t8+WR4W57rbvuKJynAL3scR6B+t2V1hxz5LX7wvf65ubiLVH2EmNFV4VdSQ+cws9hk1QktoC1l036WzZH4f17gT14hDNuM77TuJSqVN9fVFH4aLd7OXHMqconDTmekXMnPccYLl9CHYa7l1NLEVXXHVpPSKPYG7jwxr/xGeoL+161sz/z7zesi+MZTYBWO+v3mPzJFlEL4KVxw5M3OO0/pojhlt7+mG69Zjam/OkfPTtmUfkCVbb+7Byh/ZCKo/IY6xy8FKU9/dyE+tTG/wTSKxC6W0dJvLJP/6PN9/swOyd5ctbK57/GUqhT+BLskgISAhICEgI/DYE/lQSE0c66x1dWOJ28qWN+H4bXFJqCQEJAQkBCQF9QuC5SEyfBJdkkRCQEJAQkBCQEJBITNIBCQEJAQkBCYFXFgGJxF7ZqpMElxCQEJAQkBB4DhJ78lEspfnZnDt7mtOnVd+zSVy8nq8+rqPq8wRSL99FJp2WImlhNQhIR7FUA0qloNLS0kohyltd4dVGrhT4W9JWyurPuS0tpXoUnvf1MoqLpc7oeVHTh/jPTGLiUSyJUVsZW/dNOs7YSXhkJOGhQXhajqX9ABdKH1zk8G5jGv6rGSv2xRF/+BB73Jcxsv9s9l6QKZ/vMabhGy1YFRxHfHwsB7xNGdRpFNsSCvUBC0mGZ0SgjIfsnN+H0TMWYWq2HONJQ5hlk6TqTIp5dPMyx/ZaM63/QOyOynTmKuZz1GM+0+eYY77IkFmmQeSooj+Mms4Pn/1Es1bt6NC5K127dqVbt0HYH9adn84X/Q8+kN3cxfhBllyudOSarnARgjLhPvFbLXHw9sfbfi2+x7Xb3ZPS6iOEZWXX2T5uKM7pShCE4lTCfHbiH+CHh91G/E/mVSt2yYMTuFva4ePvi72FB4mFUHLXjf41G6l1rWvXboxaGsyDV4TXBCGVNT1/pkX3oYweNYzpK8O5rZC9iLM+G3D0P0Cg53psdl2shvQFrh7ZxmZbO6xXTWO8oQtJCujknLXqxS9NuzNs9ChGjltN7HX9A+SZSUzRCLjF5m4fMMHlgYZyFBLu6KK4Ly3YSs9/92V3fvljGTFL6lGvrxf3AfF5r7cGsFfdduSctWhO7U7OSOdXlmOm/79l3Ma2x9v87a//xzuf1GGgkQ/pBUq5BXki28xXs9GsH1/835csDSvWUSCBa7vH8P0nYwl/BIJwkZWt3qfnstOK+Dmuvfn0i5/4pX4DGjRoQK0v3qFBb1cyJQ6jrOwWIVb9aNJ8rRaJ6QpXVoCcszb9GGeZhhwZcWu6M82p4jDZJ6fVUYUvNVjgapQVvWq3xlFBYgIZLqvxzVZ2soI8gVVG9lyr1OeKnb1l3/64psoRSk5i2m04vlly8o87s84ljEPRMcTEROO7cg5bTqg7qpda0md5uSBPxnbpcpyc3AiMvoSqOfL4gjVDRij7X5H07YcPxjtLGxS57BCmhs5cUjTVAvZMrUWXeccooZQzjsswc3LCc2cMWep+/Vkk+vPi/CYSu38sjmQZ3E84pZC4KomVcsysMXW6uipGBVVJrIDg2bVpNSFUDfqfV3TpTS+KgEhijuOmEpFfinZzqMgx79h8fngCiYlWmOfgD/m2yTpVR1zM7slf8/kPC8XhDvGrZuJxUTnCluUGMH2oKSfEkdBr/xHIjfEiMGYj3VtpkpiucCVgYkdlWKsH3teLePigoFK9PTmtPkJecican4BoLDq3V5GYnJRNPekwditpeVD6IAjT5ft5VEn4RzEzadbFiVtFD3lYUKG9suxMrqn8ksVXgnD2SqakUlp9vhXkKXhsiUR7jCdw0b4bPRacVHtJQo1+YYLzba2iyAv2M73lcHZkiu1NTrJVW+r23kY+pSS6u3BE1zhUK5eXd/MCJPYe3eftJuSAD8uGziO8qEJ4BYn9pytuGVe4kpPJmQhbxvcZo3YXKp6/1ZoVu/ezL8gPm0XDGTFzOxnlw4aKrKQrPUZAJDH7wc0Ys2QF5ubGjB48C59TD7Q6xqeRmCCkY97i39RoY0Ouoi8pJnT2D7z3xjCtkosWms2g/jid+31mPrQyfwVvSu7FsiMwjfyrm7VITFd4eREfX1hHi087Y+LuR2ikNyZTzYhSmSlPS1ueh778lgl3OeS9mwuyHNarSQzkD+Mw6/IVn3zfiXHGziRqOowUwstJXN+aWh0X4ekXRqiPKUYmwdys4DLKym7gZ+lMqjYb6EvRdcohWmKbF5vjv+8Au7eaY+5wSkFCccsb0880WbU2QUbM4gb0NU2uxqWozLqs7CZbBtbAwOYSctESs1vC2l372L/Pk5VLnFRuRp1ivJQHL0Bi7zN03TkupZ/AZZYJEZVIrMd/mmCyJ5qY2MOcPHeNfA2ffWVLTCjJYce0jgwzj6d67/VLwUR66VMRKOXSqVPcUtRtATvHfc7Hn0wkUmPY+3QSS8GkqSaJyQgzqsl7/zdY4+0CVwJG0bCjg1ZHoxHhtbosE+4R4x3IRRkUa5CYrnBNcAqTTKn3zw545ih77By3vrSdGEaBjjw10+rXtcDNeB8OpMgoK9MmseLcCFxtt+NpPY4GH31C32WxlSwx0TPUiJqqgZM4GLPrU5slIRWj6LyTpsxYdV5nJ69fWGhKIyPvodL9KQgpmLRphU1iIdGLNElLSWK9liToKJ/AleA5DJ8SwDVVv12c95DHitfIObOmHf3Nyue+Nd/9cq9fgMQq5sQenU/kksYAWWmJdcJLx+RfZRITi16YbE6jNzrjVdl5/XJxkd7+BASKLnkxbbAx+xSrCmREzq/Nv/5el/XHK5ThaSQmjnitO7/Fd80syVY0mGL2Tv2Gjz+eon6z6MO37fkhraZEVXKTqKO8VhePjqzDyMQFT09PXDePpv6Pw9nsFUtKbPXh5R2RCFLxdTs6fDCSUNWg88Ge0dRqtIYzsWurzVMzrT6BLJfFsWa6Ka6ennh4bGLEL7UZb+nNkcu5eM6aR4TK+iq47M/Yhn3x0eqLRPdaZxoNDUDpISvCZ/QX9DMX5wnFj4ww46bM8dMYmetT4Z8gS36yL5u2JSrKVcZdbHuI/fRdEte1ocfCUyrSknFwzs8M35Cl5TUpz/ZB0lZWWoYr1ifIiosplZ3Ha50XKSo4rrr2pE4390oDg/LUL+/3N5FYZbFfhMRu7J9AjU/HqZWvcp7Svf4h8Ch2Fj990QNPhYuvUGGJvfv2IPZorM6pjsQKzzkwqE031oXdU8x7nbJozhffGXNSBmLDc+77AQ0G7VQXWC7bh8Fnb9B98VkdI0d11NfjQi6jqLCQwsJC7p5YQotfV5FWIEOuKxwZ11JSuSUDQX4es7a98VL6bsndNoBmQ/15pDOtvkIqR1akxCA//xjzGrXGNqmA4pI0LMYvJ0HtBpQRsWwmO64JlJVcJzXlhqJAhSlr6NLdQdFRl3Ef5/7fM3vnQ8Uz0cW9otWPmETo+SRQNVVz02824zecQLTFBCENs9a/sPqwjEexRnQetVsRLs5DewxrqggXCbtcN8TsirLD8fA9qSAo0cL1dg+mIG8HswZYkaAwVOUkrG1DZ6N4vZsrfGYSE49iCfdcw6Da79B0yBpcfeK1XDzFV2Px2GBAnbdqMWaVO3GKScIKtIuvxOBiMZSa/6zL5I1OODk5YGUxjxF9R+MQfbvakUFFaulKnxAoE7LZsWQs05dvZMOK0bT8pRPmu3MUo9ky4RI7zGczbVRzPv33h7QcPJO5S3y4UAqP4k1o/MWPGPsqOxR5/gnWDmrFgJmbcLQcR4fWUwm6pPJjiFPMRXsY/vHbjNiULemHhgIUZ4Zjt7Ar333Xl9XuMRWun0rhOUWxGDVojEWs2LMLXI80Z/aKfSSe389ygyn4pVdYzrry1HitXl2KehbivICOX9VguIkHR7JKyD6wioVrAziVnsHxYBdstp9SdN55cQtoWk9cMCT+zeAuB8ynsiEoiTMHVjB+kieZKhjkJYeZUasOq17Bv3HIC0/j5+BHfEoy4Q7TmLg0gvuCWN4sts2Zxpa4y6RFWzLDOIAbAshLKnRDXhiLcaOPePeDD/hA/L73LgPNU5FTwHF/RwJjU0iMsWfqcBMO39GYQNQTjXhmEtMTeSUx9AiB0oJb3Lhb+BsJRqDoQS5Xb4proap+Cu7do1D/2k1VQV+RkMKb5zgSn8SNV2f1+HMhW3L/IsdiDpOUnf8EvSzi2rl4jiXmquZ7yl8h4+q589yqThHLo+jxb5nsLqlHD3Mm84F6kwmluEXknj/KieRbL+CWl3Er4yjxJy7zqGJ8qVcoSCSmV9UhCSMhICEgISAh8DwISCT2PGhJcSUEJAQkBCQE9AoBicT0qjokYSQEJAQkBCQEngcBicSeBy0proSAhICEgISAXiHwzCRWJjwkJy2NjIwMMjLSuHAtj5IHOaSmifcZZKRlcOOF99aScyP9CFFhx7kiE7eMucSx6HCOX3jhDPUKZEkYCYHfCwHx/zuv+zqXMlnx73T6RSk6DgL4varrT8zn9d2F/zlI7B4p8TuZXP9NOs3w58SF+5TcSyPSazI//bMppnuOkfXCWz7LuZrggUGdHvjchJJ7KfjNac6glenIEcjxGU7jbk5VNvP8EzVEelUlBO4k+bDWfA2rVixh9rQF7DhTvudKAQl+5syZNZ2xA3sycJw5B1KqH4wIhcl4m89h5vTxDOjZh6lmgZSPW560w30lUV6bW+V2X7Vo0r6zYld/cWf/Xn2XE3W3lNwTgXj5+ePjYoLhRGtOVdlnUtdO5brC9RfWK+5Dr/nzEgAAIABJREFU+Klhezp3LT/doD/r9t9Dnp+Iq4kp9ts92Wi6loNZT1pOJ5DlP45JazNUfw25R3LsbmyMuzNs+Um9+y/U02rjSbvwV7drv3Z+unarF7idHsMupzn07GxGovo/eNqpX/bdM5OYKGhZNbvYF1+zpf2/NXemf7EilQmZrOnYR0FiYg53vIYwWEFiIBRcJvmC9t58L/YWKdXvgYBQcoS57SYRqVqmXZi2mrb1lyP+9znv6AJqvduQ9bFFCMXnMG/7Lh99NoHwKp2qjMiFP/PdL0s5WQSPktfR8u1/8OuYIMV/mp60w/3vUYZXMQ+h5BiOK105eCiamJgYIgPMWbzxFAXiPpQda7E0SPxXah6uAz5iyLrLWsusde1UXqRzB3N9RUjGYedVeIYdIjomhugYf1YYWZNYWESocQPG2F5TWKqF580ZOHKX4vSM6koiux2JRb+fGbSqnMQecuXiNdKce9FlztFXjsR07cKva9d+bUx07VYvcP/KRXJyHOlZZz5nXh8Sy+Pi8QhCw4+RXT44R8bVhHCCgsJJuaH9JwyhIJezh2NJzE7WQWL5ZCXEEXP0EuI/ku5ePsnhqJPkFN4nNS6c2IQbWv99eJB1nIiQcE5dUr9cu76ku9+MQGmBF33efptG/cwJTcvnUaIp7dtYIv6/vTDZjn5NO2EVLTKcuFdbXd74WwOsz2jXu7hT/Sn7gbRqb4Z4rJVQcpw5df7FVz+b8uQd7n+z+K9sBuKfzLOylDiWCTnssfMhQ9GxyLmZcpbsfNWRNm1qsHC/tvWra6fyhzp2MK/YTVDf4JKTnZmt+k+hnIv77Ak8L0M8Amhxk69ZGKTcI6n0sTd9PxxEkMZ+nuUlEc9GjPQK5KB1F4aoSKz8WfaWV5PEdO3Cr2vX/vLyKn+fvFt9yR3n/zUSe5fWYzfh5CTuuuGEneUIav9TaYmJe935GY/GKvoexXeCmTvMnDNFAlcDp2O4MZnH90+ycshgPJOVDfFRkiMzZzhy/No9Lh62Z0AtpTtRBFZtiQn3Sdw3m9bNLciUC+Se82Ny48bMcAji3K1bHDDqyIJAcdsYgawgY6aYxnJffps980ewKf7V2wNNW7n0807c9SBoXiPe+ttf+Psbn/HdD91xPV91qx5xB3qL9u/yVZ0lnH7Kn2sfZ1rT6s3/0HXhEZ51h3v9ROfPkEogO3AD27X8OwJXTvqzef4oZm48ysMnTJxp71ReIa+u8IoY+nUluxXIJsckhdVUVpbLpu5fM2eXUtGK7zjT6d/NVce0aMotcOOwN8GpxaTbdP2fIbHyEmrvwv/0XfuV6Z68W/3/IIlVbAAsAqDpTsw7vpBmzS24UCxOmBawf1Z7TA4Wc+e4B877cygpvkv4omZMcLiL6D7c0LU5lqpNY8Wdl01b9arWnVj6eDtDWokkJro0H+DYrw5mUcpO85pbfwatSKWk5Chz6rXBIaUY8Yj1e1Gz6GMYUV630u/viIAgz8TbqAd9xkymZ+13+dtf/8b3jc04ozV8L+KUbU/q/zqXsOzKVpi2MKLb0bJHbXoa7VWc6aTYhfupO9xr5/E63YlW6/KxFmjsGqUufkFuMLO69sLhiLYlpo5A1Z3Klc90hVek1K8rGbGm43HQOKLnTuxyhk/aSa68kHP7jWn2bosqJFZyP44dAamUIPxPkpj2LvxP37W/vE6ftFv9a0Vid3cO4ftfF3MgMpJI8RsVx4U7Ag9TfFg2fzWeuyPwNW7GGJsblJSEMeG7DripThlVkFjrZySx/s2xPqvsGHPd+9Nv2TmKi3bQ792WmO9XvTsyiqMpd8vrSPr9HRG4HTCKn1tbc0UOgiyHA2v78s0b32psnCojadtkRszcSWaRnOx9rhxQ7HhfVQihJJ0tkwey1DeDIuE6e1z8FWc6PW2H+6o5vT4heYfn0HF4+U7sILrHEuOSuKNYx1DKUdNG1O68RXU8vTYulXcqLzfYdIVrp9afO3lJDDMaGqh35S+X7OHlo4TtD+ZoyhZ6fWZAmNbAqpiodTNZ6eKJp6cHlmPq0WzoBvxiKk64flXdicryV96F/2m79itTCU/Zrf61IrGiTBt6tDBB5S2k5H4iCRkXWd+pEWviROd9KfEmTTHYnMLh6EicBrZQ7ags7rJ9jqXNe+KjOqFO7U4UU1W2xKohsRLhIpbd2mB1unzO4B6JZy+V67b0+zsicMWzL3U6VJzxpbCcWjZTzXuVkuJjQKOGAzBavJjFi+bQr9Eg/O6A9i72Yp1fws2gIS36z2bx4sUsMB5Au35uz7TD/e9YnFcsKzmJ61rT1rDieJrHaatp+m5H3C+ILCYjenFd6nRx4bbw9J3KRX9GdTuYV3UO6xdMjy+spfW3MzROHRbIDrPCZu8VxYKWG/sm0n1iiOLEeM1d7OWyIsUpAIWFeUQvbcKAFecolFWsYnyVSay6Xfh179pfoRuleTufuFv9/wyJae5i/+uwdeyIzqHwciTOKwfxw5v1mLzBi+NXH5Pks4DZJlsJiw5ll2cw6UX32b+4F5PXh3M4fBfulqPpM2o57vtyeJy5iyWz17LncDwHtpvR/bsajFoZQnF2JBvHNKRBb1OCk9IJtRtDwxr9sAq9SEbkJobV+44hiwM5ez6MdaPqUa+HCSHnCylI92H+NFO8QqMJ3rWViPPSnNgf0fXIC06wYWRH+k5YhuXG1SwwHMkMi1jEf1jkHV3MT2/8lb/85S/q7ydfzuBoceVd7GVELqnHv/5aEe8vf/k7rQ0PKUR+2g73f0S5Xo08ZYTPrUPH2RVHYohtc4+dLbsPp5AUa8+o9v1wPVP4TDuVy3TuYK7faPx/e+cBFtWx9vHv5t7kJvfm3pie3PRqYmJiEjX2qLGbaOy9947GLmJv2AURKaJUEaxYsSCgYFeUqtIUaSIgdZfds7/vOWcLC+wiWCIkZ59nnz17zpw57/znnfnPvDPnfXNOTab2p1M4b9gxJ3B96xCGLj9NxJltzBq/klCdx3VjL/baUqmJOroJizbvS32Me0ACKk0KQd72zO3xJTV/msxGp0PcNORdtbHQS2fKC785r/3GXuwx661e4HbodmwXdefLt1sw3caZ41FVD5RKbbHXg/WgX406h7ulvJurCzNIvacd3ynzC422/6rISk3hvlJJZmoKmXnlr5886Nmg5n56BgV6O8mDb5BTPCQCqvwMbsUnkf3Ehu3le7h/SLGr/W0FyVeJvFO6nShJjjrNyeAwUkqY0Kp9cU0WQFN0h2tXk0tFPlCSFB7E6UtJf9HIB+a88Jvz2m8MbdX3Vm8srfHxEyEx4wfIxzICMgIyAjICMgJPCgGZxJ4UsnK+MgIyAjICMgJPHAGZxJ44xPIDZARkBGQEZASeFAIyiT0pZOV8ZQRkBGQEZASeOAIVJjGNkEVs2CUuXLpMvPaFFEm4+0lRREZFExURRVJqCrE3dF7tJW/3+uMb3L6nRCPkEHf1EhcuXJC+ly5dIy41z2iTxxMvr/wAGQEZgT8pApX3bv9n8mL/J63UChSrEiR2j4iQXVi0epsv6y8mXNqRJnA3+ihr+rdjlM1RIq6HcXjbCGo99x0zvPylF56P7N/B+qmt6TQxCKWQSXToTsbV/Q/tpuwjMPA4u7YuYGi3YTiHZlVAXDlJVUPAtIfsQi55rMTOZz87XVawfseNUrvItKXIjfLHY7sPPt5bsVnpRpjk7rL6eVX/Y+pEzZ2zTkwbNZZplnOYOdfLsAX87sUtTBk9mQWLZjB65HwOm9kbrhGSOWYziZHj5zDHYhgWS/xJ0b0iJfqrPO+5ggVLFjNv1kQm/L6Fa1XU/WhB9Cra1qpPh54D6NdrCGsPp0hOf815tzddPyW92IuOk8+5WLFgvSvOq61Yt7v4BWjT91ets6Kj3yUdvqJhu54M6NeLsQv9pZfdzZ0vKb05L/agETI5tdWaje4+uNsuw1N0dFrFPhUmMVFuMRSEp7MNq379iE5zz6PfWZ3p68ZBXdnEF+NavdCRHUZeyzWaeBw3+EovHorKYt/pVUY4F+8DTtramQ8+m86lqvcKQhWrrqoljjkP2QXX19Cjj5vkQVz0p2nbuzvuOs8s+hJoNAnYL3Tgju5ViPywJcxen0BRtfOqri/Rk/wVSNw3nC/e6YpPvJr8yKXU/ZfWQ4oycye933uXKTtFxlFzdWUTPqxtxdUybUnFBeumvPZCZ3zTRIfLoVh8/Qo9raMkwXNCZ/DL4H0USP9UXFzShA4zLzzJQj103vmRdljO2YS9kzenb+jda5n2bh9mps8t7cU+O9CChq03kiKIL+GHYdluIPvvPrSIf/iNgjqcDbMt2bTJiZ0BN3V9rVgW0+dLCmjOi72aS+s7M9g6CjVKgpe0Y8ymqkfulSYxr617yEpw5pcP6rE6RKsh5kmskBN+/uSi4vqFyzqnpKVJTCDB6Rc+/mYeEaVffSmJtPyviiFg2kO26OqmLe2nndPNvhQcmlSbofbpJaQXfWcua1+XSVuuSg0ueZ8l1nuzMOdtvXjIUyKbv8QfyZtN3X/xbbcdiC1OUERzwG2/FHstdXsPXnmmIZuitVOq7MPD+d/fv2RZSEkWEx3krvr537zx7gRCFeKANJONHV/kgzqLJQzT3Lvx339/Tv95e7iZW0jIvCb0XBZdJfEtiNyCq79+CK0X0bR3e/1V49+yXuxVnJlfjx8H7tcNzPNw6vo/xrhW0amocWF0x4I6gi2bj5WI6CFeMne+ZBamvdiL4XtG1WyPe3Ih2Vl5VTYY60ORWAFqol27Uevb2ZLT19Ik9vMLdRhr48CGZSNo388Z0cd88UcksZfpNOcwJwOOscd9HdPGz2JPhJkhU/GN8lGVQsCch2wVwZY/0NkqXLfWKYZjqUMnq/AyJsW000tp8c7L1GrZj9l25yjdZVQ3r+pPqnpUWU60eeEffPbTaKzmL2LW0A70tPAhQSXWQSNe/FsLXBK1U9r7gRP44B81GO9Vuj3l4NrnDV5/cxTBCjFkSwwLGr3Aq68Ok8RW3jvM+O9e4pn/+xsvvvUeP7RaS0xpnnhSBaxkvvmRNsyZv4N9+/fitGA2286V1Bxj7/ZlszblxV4g1r499XrtlGaiGk0a69rW4LdSYVrK5lV1zogzrnUzF+Czb7+0RLNg43lpcGjufEnJTXuxL7i+nIZvtmKuszeHjrkzd/Q8jidVPS8SD0liotPRWzj1+IS2U4O5vcO0OVEoCGPlQjeJxDQqBVoXZdqZ2GC7FLKzUrnkO4Oe3a25UFIPS2Is/6uCCJjzkJ1JwAxj0tKSWMdZl0uRmJr4Y07YurqyfMgPvPlaY1YGGK+LVjev6k+uihTJNjT95zN81sxWMncpbq+j8T9r0G/DDS4uqV+CxHKCJvLhP15krHtpEoO8KHs6ffotk1yDOeq1jF9rPstrrw+XBC+I82Fcuw4MHNmJz//7DM88U4NfLEOeXKEeIWeNMof7uuKJfiNb1F9IlMGKU9a7vfGjzHmxV+cGMLPzKPYlqrkfs5dxDV+lSzUiMdFnZk62FhTJl2nTxqy/LIJi7rwxKmDKi/39MCu+ea54gJTo1Ilmww5L1oCSdz/dfw9NYqLYimQvun38LROn2nNAp1Ql18TU3IpLkOL9ZB705FCayOIlzYnietmy5q8zwvne00VCfnolETDjIXthOBeXN6X99PM60lJyxOIreq+ML2GOUKZ7MNlir272pSDSdxhNO9gbPK9XN6/qlQSvUsnVhTvoUuM5Ggw9LJmLVDkutHv+79QbuJe4Ta158W+N2XxDZ070H8U7/3iHWQdMTaOUZCbf4GLgMU5HXWN5i3/zUX1rqU269X2PXiu10aDzbvkzv9NHvPW/sZWS849JrOCSpzU+l3XBLzMdaGO0Bm/Ou71WtvK92At5sZw67Mfh0HA2dBJjk1UfI3ZuuCdrt12RzKEaMtjQXhsyy9x547oy58U+PdmGFq/0NUQKyNo9gJrfL+Fmsb9k42ye2nHlSEyThofzbt3iryizQPyOfnz8fDf2mCQxbblE+/uuhdack8z0pUiMbDb9WoPmE4JRCjlkZpa05T81ZOQHPxABcx6y7wdOolW/XdKITdz1tqVXPV20gmLP2QUx1oybdcYQBl50XjpnjANiEIPq6FX9gWA9QgKNJgnbX16j9m+ekolIkbSeps+9QNelUYh1UPdfn7MwUNtubm/rzJsv98IvQ1wPucKans3oMdMfcY57x60rb/yzC7vuger+Tnq+/TqDNyVIG7bsfn2T/hvuGAYa4gyn2XdWjyD1k7lV1KdtE7tjH6olmIKYpTT74neDI+Cy3u2hIl7slSmHWLNsH3fUIjZ7GPzjKAJMRIV+MqV69FxTvScyZOVZ7ZqpEMW8JrWlNmfuvDhDS4qIJE0J5rzYK9TXmNfsF9x0u6/ubOtC/Z4+Zcz+jy79o+VQYRLTCHEcWD+O5t+1x8rplGHELEYS9RzzuzQTUyQcZ9OCrnz0bE0GL7WTIj9v3LCC6YMa8m1rO1JVtzjuZk2v2i/RqO9KtgeKYRPUXNrYgS/rz+fMVU92HNfvNnq0gsl3P3kEzHnIFr2qb7MYw+bgOKICrBk32Vcygxl7zhZ3Le5aNJk1PmeJjjnPXodV+JzLQ11Nvao/abTzrrsxuFkLhs9byew+jWjey5YwqankELS2G83aTWDd5hUM+KkZ0z1jpfVItTKI6fXfo9GA7VJ7zY/aRNemPZi7ahFjOrdhxPKT6By9c+/8enq1aM+o2dasXDyDkX2HYhNQNa0jWRd9cNgeRHhEAGvH9MH6SIaBfMt6t4eKeLHPT3JhSDdrQqNDcZ41gY0BxXk+6bp9HPmr8y/gvdGbUxHh+G8cw7DZR8kUwOz5okAm1fmBpdLgJ48zPnbsDIzgyklbRveeS5CkGALJxxYwcf4+rlzzw7L/SLxNRWJ9HAV4hDwqTGKP8IwK3CqQERtK8NmEKmdvrYDwf/Ek5jxkF3LnWghnw9PK7JgqBkxN+s0zBAZdIim36i0YF8tZdY4K7qWQkV/WnqNWZHHnVioPCgKhUeWSlpxuJsqDmryMWyQkZZZTZ1UDi8J70YQEnSMxqyQWpr3bV0zmgtRrBAddJjmveuqiRplBZEgQF2OzSjiQMHe+JCrmvdjnp17l9KkwUnTWtpL3Pf1/VYTEnj4QsgQyAjICMgIyAtUPAZnEql+dyRLLCMgIyAjICOgQkElMVgUZARkBGQEZgWqLgExi1bbqZMFlBGQEZARkBGQSk3VARkBGQEZARqDaIlCCxMQQKXZ2dvJXxkDWAVkHZB2QdeCp64DISQ/6lCCxAwcOMHr0aPkrYyDrgKwDsg7IOvDUdeDgwYMP4jBKkNgDU8sJZARkBGQEZARkBKoQAjKJVaHKkEWREZARkBGQEagcAjKJVQ4vObWMgIyAjICMQBVCQCaxKlQZsigyAjICMgIyApVD4KFJTJnsz57j6QbHm+JjVVk3CQ3w58z1XDRCJtfPBHAs+IYhVLaxaOr8DJJu3eKWiW/SnXtGnvKN7yp7XHQvmlPHD3M+VhuaoWyKypxREnXQgT2XH1dwM4GUEx74x5f071YZieS0MgIyAjICjwcBFSpD3LXHk2NVyOUhSUxN+Jrm1GplL4XO0Bek6F4E3hYN6LYwGpWQwZV9FjRpsJTYMn24wM2NPWg7Yg2u3r64zmvD22/+ygpvXzy3WjOwQR98M/W5lv9blHYFp+F16LcmqQShln+XqasC8fuWsm5f8iPmUzJvdeFhVi4PrvIOVUtK/eB/GiGdYKeZjB47Fcs501jmEa0Lq5LHZe8FWEwYy6CuHeg6eAH7I0xHJlDl7Mbil75MmGGFleXvDOsxBBcpkB9oFDkkx5/HZ+14uv22lMtyhB4QIz6s7kjteu3oNaAffQcvJjBZdFYrkB59kh2bLOjQah5XzGB1L8wFq7kb2LZ1NQuW7CVJ1y5zwj1Ytd4Hv91bWLnCl/gq3tEVRK+iba36dOg5gH69hrD2cIrUZovSz+Dr6s0Or83MGTEC+2DjIKt6nS7kksdK7Hz2s9NlBet33JDi3mmEe4QH7mL95Hb0sjxnCBGkv6uq/wpCJEs6fEXDdj0Z0K8XYxf6GyKNaGUXiPcZzPBlMSWcA+uv3Tm7EzdvHzwc5jJq2BrOS/2vOX2rWmg8FIkJwg0c5veg8Sut2ZZQ0uPzXbcedF8YLQGlKnClR2NTJKbivONmQnWTp+yA8Xz9+TQu6Rpfxv7N7Khw5DWBGJt2DHhEEitMdGWG5SEp7tLjrCIxAvaGqcuJqeIdQ2XKrNGk4j38C77vtJXbahUXljXkf/8bx2kF5IRMo+ZL37EisBBBcZUFzV7itbeG4m9iUFKU4UCrF/7OM//4F+9+2Q5Lt2uGKAb5YR4sXLyWWb+9x+tvjCLYVIzHygj9p0ir4qLdHOZt2oTL9pPEG8YGApm3bpCYaEeHWlO5aILE1IoAxtVuj+ttsb2qOLuwIxZeGQhCNMs79Wf3XREggTinvoywTXisA7nHDX1+pB2WczZh7+TN6Rt6ENRcWfEzTUful3Qozb0rnzVYxa1SA+iC62vo0ccNUR3FcEC2vbvjHi+gEbK5dSOJKPuOtLYIqX4kpg5nw2xLNm1yYmfAzTLWL2X6MZZ2/opuJqJVizqwoGVNZu8VY7Tl4NjlNXosFwOkmtO3x12jj5bfQ5FYYcwWnPbFsKHD2/RZU1LhK0piUWfOkaHjv9IkprxznksJBdy+7M/evf5EpBQzgJAfS4j/YY4HX+ZiRKLU8PQkphayuB1zk7i4RDIKoWxac2DlsH/mGDziSmp80f2bnAuN4b5a0DVqNelRF4i5K6ZTkx4dRbpBNDUZsWe5cD0btVBM7GK8NduRUwn9E3XC+eELqPNcDcZ7aQMT5sYcxMsvUmo4+eE2dK73M6sDxLgNSk7O/Jrnn6nDmosGoAyVUJThxMgR+8hTFeNluCgdKDk+7UuZxAygqLji7CANFgynjA7EqOrmSCz/6jzqvTmUE7qB412PbtTp7EFGsg1tv51FmK567gdOpGF7R6pmJDFtYQsit+DqX7ZBKe9GEHZTJDU1V62b0WjQgVKduRiNvC3tp53TRR1XcGhSbYbapxtQTNhcXUksgi2bj5m0+Gg06Rxz28mRNa3pYYLERLxSIy6RkAviBGVh00+Y7ifiWL6+GUB7ygcPQWIqLjvZcTJXIH5LJ75ssoZEo76/YiRWstSlSUwcEd7eOZZRq8IpyDzHwh7dcQlXSaNGmzm23CgE5b3DTJ3mUYLEVOpLzG7bkqk2h7ieHU3ZtCWfq/+nSHFixHBfigO5CqScWs74ad6Ex/gy8vtfcI4TyDjthlugM8N7OpAmJLGhU32WBCsRleTw4tEs3hFJmPcoGndYizTgFaVTX8aqn5Whk9A/szr/Jjn/wgvPvE27UXOZv8iSQR1+wcr7pq5jKC6Z2CCWNn+J92rN4oKJWERFGY50rduH2fMXMm/KAPqNcyJMjORn+MgkZoBCOlBx0WYWy3bsw2+fCwtnbSLMaPm2PBJTpGyizVv9OCzVg0DC5nZ8/MMyws7NoUHdhcTo2nDOuRnU+2FhlbYc5EfaMGf+Dvbt34vTgtlsO1cMQlHKWbZv/J2Bg9Zw4Z6xLokAqgi2/IHOVuE6k5o4yKpDJ6twg+5WXxILZ93MBfjs28+urQtYsPG8jsAFUoLcORCpIHp9GzMkJmIjcOucD+um9mP8qhCyJejK17eSuvn0/lWaxAT1VTYuc+f6rVvEXl1NixpN2Hy9mMUeD4nB3TNbsPdLpEiRgf+M+gzdmIGgPs+sls2ZuMqL4MhUbtyMN5BYn9Xn2bthLUdjtbYU02lNAS0Q69ifaduLG4I6z59RdXuz9y6olQcZ8dNUziuVXDwWwHX/8XQeewKlaHpxWIxbgkD6oVG06OUtkWDWodF0mXzaYI5Q5XgytK+rEUGakqE6nRNHsz/z3N9eoO96cR1SIHp9C176VxtcSyx+FnJ+Qwe+/XEKhxPKzsLEEmvUcVw4lyx1KOqCPfR585/8OMjPCAyZxIzAkA4VOdm6TU9qLi75id/mhRk64PJITDQTHbbswnTPWxQVRLNjckM+rbuMyyHTS5CWRGLfziXCdJWVFuep/Ncoc7ivGxQVRC2mRf2FRBnJq1EmsWtKW/otDS01E1MSMMOYtLQk1nHWZQOG1ZXERKtHTrYWFEGIYG7Txqy/rKIoMxgv30iKpHZaHolpqzLvzgEmtOnIxtNaM215+vZUKt/EQytNYvlhm1i4ypfdu3eze/cOZrV+my6LowyLhY+LxLIjPJgzdTEuu47iObk+A9drF29zYw+wcsoA2v7wOW3GiR2euCbWhsZ9VjKjS3MsD94z2PPLpjWBAPl4Du+Lh7RArr2edWA49X/dJq2P5V2Ywy/9d+nWahT4TWjB/GOiKUPBMXsHwlWF7Bhak1EuopVdRfDcVkzz1ZrZxNyy/CcxyvqGAR9TElS3cxk+vXnp7+8w64DWpJOytRPP/+0tpu3R7xBVErZtBH3Gbye2UE3CPkf2lzLViiaMcLdxDJjkzW01CEWnGF/zn7xTc5YRHDKJGYGBoLyG23I3InQw33bsQK22zoYBUvkkJuaUy/WQw+zfH8pFx07U672TjOvL+Omb2QZLQU7wJOo2XWOwJBg/v2ocK7jkaY3PZS0IqkwH2rzQkR2ZAilhwUSkagfUeVfm8u2/2uFp1K5FnbuyvCntp5/XkZaSIxZf0XtlvKHPqK4klhvuydptVxBbpIYMNrR/hSEOqRxfPp6FDi64uGzBeuA31O+5Eu+TCSX6I9GSdCU4DGmVBBUhVt/zZavNpBSWr29VQx+orNspJSGbbAnR91VAsndvvvh+MfrJ2OMgMY0Qx4qfv5dMdSIxnJpbj/7rIgg8asv6NZckBdRoElnbe5SOxNoz0jEbZaoPA5oO43CKgFq2V2+oAAAgAElEQVS530TasrCLo5bF/WZwzmgxPM29J51mic9Rc3nFzwzeeI29PgHkCzEsatMXvxxQ3z/Olm2iGSIf1/5NWHlWhaAOZ16L3/BK0Zsx8tk/ZQCO0cUz1bISVL8zimQn2tZ4mbFu4uxVIGbDz7z4j0ZsihTLqSLCoz/ff9eFSTNnMnOGBZ2/74b3Xci/upFuTduy/LC44qLg0MQv+aa1DdEqUBeKM7Fn+aazuxEgMokZgYEqZzsTuqzmsjRGUnN5WVNaTTplmPWXJTElSRGRpCm1mxj8Vi3HP1GNhiy8hzVi1v4c1MoAxtUdxGHduCvVqzftjPI0fn5VONaQzbaJ3bEP1QpcELOUZl/8ztnCCOb++Bq9V2sHjDlnZ1Dr3+3xShbQFCUTGZEiiX8/cBKt+mkHpWJeW3rVY3FQceOvriSW6j2RISvPSoNtQYhiXpPaUrnUykLy8/PJz88hYHZdusy/Sr5SbKfFuiHOZuu91BJnqRNXEjDza2q1duBOtle5+lYV9EGUocIzMXF9Y+fyYTT8siXzfLS7D4WCa3gv6MonL35GX6stHD91hFUDv6POL1YcCIvmkM1Avvu4E8t2h5ssr0hWR7c5YD2mMW+90ZypGxzZEXgLNZn4zezIiBX+BPnvwNl6AL/2s2TzLkdGd7DA88Q5LgRtZ7XNYQqu72de91o07rOWgJux2Hf9gM8aj8P1lHuZtKaEEIrOYjliDQlGPKNI8sJi3CaOH3Zn9biujF60Bod9aQjkcnD2EJbuPYzXRm+u6CyQqceXMHOFD9vtBlLvxyXoN1Yq725n4iiPUltdTUlR3c4pueo+nObNBzF/pRU9GtZj1AatDT4nZCZfPP83/u///s/wfePdcYQo4P6pufzwzudM9tR2KIWJu5g+cCTzVq1hzsDG1GsxjcO6iiiM3YnVxPH0bPg2/335B3qPn8hit4jqBtRjljePMz527AyM4MpJW0b3nkvQXXHAJHA7dDu2i7rz5dstmG7jzPEoJeqiQCbV+YGlgeK67S3sh/TE9lQ0Qa6zmbYiEO3yo5por8lMWXOK2NgTLB35Owf0C7qPWfrHlV3WRR8ctgcRHhHA2jF9sD6SgYCa63622HkHEx55krUDWzBo9SXJ9JoTPI1630yXHq8R4tlmMYbNwXFEBVgzbrIv4phT3IAV5G3P3B5fUvOnyWx0OsTNYm57XKI/sXzU+Rfw3ujNqYhw/DeOYdjso7r6FR+pJuroJizavC/1ze4BCRQZ64YQz26bDewKiiAs0JZ+zTvjeFE0TZrTtydWjIfKuMIk9lC5P+JN6sIMUu9pTVbK/ELUKChUCBRmJpF8T2EwAZh+TMXSikS6duoqA/Ho89IUZpKZJ3YQanKz84yepSQzNYMC/WRLfwNw78BIfrPQj4zzOLlsEi7XjIz1Rmn/HIeFZKSkU2A0AKh8udTkpiVzL98EoJXP7C9wh5K0mBBOnY3jfiVx1xSlEBYUxNXbxvqshSw/7RqhIRHc1Ta3Ko9j4b1oQoLOkZhVEoTC9EhOBwQTnlRs0i9bmELuXAvhbHiayd18ZdNXjzMaZQaRIUFcjM0qYS6smPRKkqNOczI4jJQS0D28vlXsuY+eqkqT2KMXryI5qDi9ZjlHircmVuQmQ5r7AbMZuuAshXlhrBrQH9cIsVEJpASuZYXHzYdQJkPW8oGMgIyAjICMwAMQkElMtA6nHGC73x2j2dYDUDO6XJR5hQM79+K3ez8Xk7T2h5yoPWzxLX5x1yi5fCgjICMgIyAj8BgRkEnsMYIpZyUjICMgIyAj8MciIJPYH4u3/DQZARkBGQEZgceIwEOT2J/di71Gkcr1axFERV4j/OY9VOp7xIr/o8KJuJEmbWvOvhVJeGQkUbHpqMglIfwSFy5c4MKFS1y+eoP0vJKLzhohg6Me+wyOVx9jPcpZyQhUCgGl4kEboyqVnZy4miOgMnJvX9104yFJ7M/vxV7IT+DInEa8//0UDoaloSpKJmhFW97/cgQ+5+5IHjtSLq3ktyaDcA68hVLIJPyEJY1eacmKgyfx3+fB0uG/MsE+wvAej6jnmUdWYBdYTbaAVbBhiu/bhGyZyliLBSyYMYoJVntJNLM9WZESyJqJw5k4Zy6Tho1m/eEk3eYXgVuBG7GcZcXcGcPo2Wc6vlf0zl0rKMhfLNmDvc8L3D69jXUbbFizaAxDRjlIbqo0pGPbvSZ1m7eiTZs20rdjJ0uO652ZViMcBcU1XK034Lt/N04rrNl/vexuYI0mjdM7XfH28WKj1QgmrTotvSBu7nw1Kr4ZUU176jeTGGXqDoZ0s0b0R1AddeOhSOyv4sU+w7cPtdtvMfggi3fpTM0fVxp8RWYEOeB5pvjNb9F5at2f1nNHt1s8J9iC2p+V9CquSLLDckmxrzZzilV9zgsk7RrIx28Mwv++zoFo45fpMOdCmSKIL4MvbFKDrzpulRzMii+kfvWf5jhGqlHn76L3W6/SbekVioRIrOq/wLu1LLlatk8qk+9f8URFvM+rlSewGmXPTWnMlMfu0TVp/XsohUWh2C105MiJAE6ePMkx3wXMXHW+wjH8qg7eaq6s/Y2JWzMkkRSpjvT7bRNJpd7WKLi+nJYfj+F4Hqjy3Pj1P01xjFVj7nzVKd/DSWLOU7+p3EQiP7i6M3UbLJNITKiGuvFQJPZX8WKfdXA43zS3IU0AdcEZbMa35ps68yQ/bYI6Eje7gyVeKLywuBHtZ1wwuLQ5u6ghTQbtKxHeRZG2mQmTA/4076eIszCX7q/yYd3lUiMQPXHsGvE+//tU+3KpccNRpNjS7J//oOkYbflV2c60ef45Os2PQPTJaTeyB7Pd4yhSBjOu5vPUbGEruaQyzkM+1iKgqID3eXWeH2Mb9cZL8mmpJnx1M77+ZRs5QgLxuqBhGiGR3TYexJiZOVdlvDWaJFa1qcuKM9qRjuh9ZEytX9meVlJqMfZdxKVYaTBaELuCpu8P40QumDtf8u7q9u/BnvqLSyRw56QbO0+uol1jLYlpqqFuPASJ/RW82GurWZxZfd9gObFqNVHe9uw6YUXDL8SZlcAtP3t2631tSbGJUljf/l16Lz3KiWP7cHdYzvwlOykdcDo/bAGj5uh9txWrU3U9kmIRNXyBT5rqZ6CiO6lPqfF8rzJFUuXtoOurz9FwmL9E4gU3VlD/2b9Tf9B+XVoV4QdtmDOgLl+JjoOrenTGMiX8407kXayc93kxBtzmrp/Qf73xu4sCCTtX4mouiuYfV5yHepLo5Hvadw3ZcE279iwUnWHyNw3YYMLBgBg77LSvLZP7DcH+dKbhdRpz5x9KoCpx04M99evFLLoXiNfOKHJvrzOQmP6a+K5rddGNSpPYX8GLvb4i8y7Mpv63c7mc7o/ztghy41fR/MPRnMwKwXlzSAnziyp/J70/6oL37WzuJp5mae/O2Bk7ZNRlmuo1mEkupiLO6p9avX4lj9n1jElMyeFJn1HjH91NFETJpc1dqfX1MLyCA3Bd3pVP//F3fhx80JBWVZhDctR2+td8mUZ9t+lmd4bL8oEOgZyzlfE+L3DrgAW9R/qW2FQkdvqWg5ZKviurI7Ci6cuidjFpaUmsLqtNxK7Tlk9B3KEpdGy3goslllvNna+OqDzYU79YKjGS9Un3ndxQgsIEiVUn3ag0if0VvNjrVVd0jNno8/HYOzly5j4o7trT5s1+2GzdhH8J79ggrn/Va7ZOZ49X4DfuC/quFkOVGH8K2TupD45GMzjjq9XxWPQ5t6bVi3xU31rnf1LBntEf8PrrI00WR6PMIunGZQKPBRMesZYmz/2Tbktvokg4wArLVQRIvhO1eTz7zMfMO/7n2gRjEpSHOFlQCe/zWWFbWWjtL5nFjXee5QRZ0LK3r+T5/CFEeOq3iFaA+U1+wPqszpxYFMiYmi1wiivZ6oruhhFyJU3aQCSoLzG9zssMsktBYeZ8ybufejErKcCDPfWLGd4/vZxJcx1wcXHBcd0Avv28N+vcAg2DnOqkG5Uksb+GF3u91ihuraXpq18xbbuWjFQFbvzy0scMd4gwxB/SplVzaWkTOujWw6TZSf1XGLU1F2VaimHdTF1wlMl9V//JZhcqzi9twDsfTZYiAYhhIOw7vUKdbtslj/Zn7XrSsvUcgjLEnU/3cOjyKl//6ko2kLJrAO/W6IhnfBHxDu15/m//1YW0ycdz4Fs8ZyYitL5+/sq/5XmfL0iJICZZu8hVmODPFs9zut14ibg7H9CRltjZNaHZqOPVeH1WjMzcgCk+Wmd/Yuy+nt/9znnRa7/Bc70Yd60BXzRbR7wU8icUi1qvMGhTEudNnteGfKrOumXeU3+x53rUSgol7/b5ZJydRcMfFxGVp9TtFK5eulFhEvsrebHXK7Aq05Eu9ecRrlv0Vit96ffdGAKN/CxK3q89V9L/h9f5eYQTobfViJ6y13f4kKGbz+K2wYdbksleINF7PDNcq38j0eOj/1XnnmVZt8Z0Gb8WO+vBtGgymr2SK38FR61+5ONPBrNPmrmquGTfnVbd52C9eDydW/fFNiBVmq0WZQayqH83Rs9ZyerFA/nu3VoMWqHdCq1/jvxrjIA57/NKjkz/nra/B6POD2Ty96/x0iuv8Ir4rfESXRdE6joqJf5TatFyot5htXHe1ee4MMGLyaPXERp3k8PWo5i3QzvgNPZcr0jYh+36HZyOiMB/40Da/rKOa/lg7nz1Kb1pSc156jeOaqC/UxHrj830Nnz0UScWO5/UzcSql25UmMT0hf4jf5+mF3uxnOJieGJCsUtnjZDJ7aTiCNDlYSEUJHI+6AJ6Z9rq/BCWTLCp0mHfyyvPg68JFGbd4XZqbqlZatk7i3LTSEnPL2Vq1aZT5aWRmJBCrry1vixwJs48ivf5guSrRN6p/kBrClO5GhJCdKp507NGmUb46QBCrtzRBbjVgmnuvAmoq9mpR/PUX510o0qT2B+jNY/mxb4iMoqLqEfXzmfPjZIePCpyr5xGRkBGQEZARsA8AjKJPaIXe/PQ6q8UErZ3M/uvFs/o9FfkXxkBGQEZARmBR0NAJrFHw0++W0ZARkBGQEbgKSIgk9hTBF9+tIyAjICMgIzAoyEgk9ij4SffLSMgIyAjICPwFBGQSewpgi8/WkZARkBGQEbg0RAoQWJLlizhueeek78yBrIOyDog64CsA09dB5YuXfpAhitBYgUFBWRkZMhfGQNZB2QdkHVA1oGnrgMiJz3oU4LEHpRYvi4jICMgIyAjICNQlRCQSawq1YYsi4yAjICMgIxApRCQSaxScMmJZQRkBGQEZASqEgIyiVWl2pBlkRGQEZARkBGoFAIyiVUKLjmxjICMgIyAjEBVQkAmsapUG7IsMgIyAjICMgKVQkAmsUrBJSeWEZARkBGQEahKCMgkVpVqQ5ZFRkBGQEZARqBSCMgkVim45MQyAjICMgIyAlUJAZnEqlJtyLLICMgIyAjICFQKAZnEKgWXnFhGQEZARkBGoCohIJNYVaoNWRYZARkBGQEZgUohIJNYpeCSE8sIyAjICMgIVCUEZBJ7krUhKMnNzkNl9hkq8rJyKTJ7/a97QaPKIztXWeUB0BSmEnE2mDNhyTzY33bVK05RXhZ5lYFZ1GlZZwEBRW42+eYbd9Wr7D+pRDKJmaxYJUetfuLHnwcwefY85s+fzG/1a/Ldz8OYu2A+ltNG0qnht0zammbybvFkYcxOZnT9ig9rzuSKCUVX3D7Iwv51eff14ZxUmM2m3AsFd4LwdHLGcbMttk7HuFVYbvJKXBRIDLDHcmJ7Pnv5dRr3ms2CBfOZbzWb8cNGscI7grxK5Fa5pGpuHlpOr7qv8ePgg1Smf63ccx41tZIw90kMs7Dn2LVYLnmMoe+0408Ql4rIKxDnZ8nwSV4kqMtPrxFusWd5f7579R1m+FVMAQvj9jCn57e8/5EF5/7wiql42cov+aNfFYRoPOd25cuXvmZFqInG/eiPkHOoBAIyiZkEK49tY4fjlyZIVzWaBJY3f5FGI44aOtWciwuYsPCqybv1JwsiF9Hka9MkJqZRJNvw8zsPR2LKe4dZvWQ/6brO6t6llYyy8CNTK7JehEf6VRW40vGFN/l9VzE7Ku/uof9Hb9Bz5bVyZpiP9FhplHvDtjUNjUhMrQzCcdP5KjNrVWW60uWr0QTmi2UVuOE+mDb9XbnzGPGvPIpqojxH81u/zVyvQN+q0SSxstXHFSYxUZ6CuJU0/+xpkFjlymYOu8elR4IQjmXD72QSMwf0H3heJjETYGs0iWxe7cF93TXx//IWL9J4xDEDiQlFIdiuPWni7uJTBVGLaVoeiaXa0uq9hyOxezuHMmxtIvo+UxwdLu0zhdCKDaqLhSznSFXoJpHYVCMSgzycu9bgnZqzCKtAR1lO9uVcEojd2IZGRiSWH7GMUTNCqgyJiXX7U/1FXH/AjKecQj71SxpNMqtaf1IpEiuMX0WLz58GiT0euB6XHglCBHMbyST2eGrl0XKRScwEfhohg6tXEtD3T6ZITCNkcfVKLOr7kRz2dsfZfhO7z6QZ7hGz1ZNYSMIZ9nl74r33LHeKJzUoypCYmuQrfmz3dGfLlr1EZekpqqyQOedm8tVrPzDF6TL3BciN2sCYKQfI1iUtSI0kKsnoYWWzeOAZUySmEW6ysPGL1Ouzkwwj8ZR3L7Nvuyduzi4cupqlI9d8rgf6st3Hl517/Nm3O4DbRfe4fGg7Wx2dOXlTjYZsrh3zZqujI8ei9axoTGJq7oR5M6nx69TpspY9fvsJuJwq5S8UxHHCdzs+vr7s9d+H34mEcsqUS0SAN+7u7nj6nCJJR/ZFmVc54LWVLe7BpOTFEbDTDReXvYTfNSpcqVyL0i7j5ziEb2oOxmmvH357ffFwcWCzSwApaqhInlK5j3rjtsUex+1nuatTNo1wj8uHvdjq6MGZ5FyuB+/EbctWDl7OMAxYRHGE/Jsc83LGydkZb/8bkhlTyIvmmPc2nF2Oc1ufn4ivieeIeVSUxFRZ0RzfswPfPYFEXLMuQ2JCXgzHvN0lbPcEJ0oDvbLl2IWHhw+BkaJuKIgN3YO7y1b8zpdsM+baU+myVQTjktVmXo/EdBlR/mx338JmO28upepbvnilrA4nqcGYxMSyhgcf50TASQLPxUtroznxJ/Hd7oOv7x6O7NvN6XjjPEtKJv97NARkEqsAfqZITLxNUIdj3aMXrhFqhIIbbO73IyM23zQQmUhi9d5qwESbIO6q1aRftGVAx4kcStR2kCVJTMlFh150HbtPMhHmRq+na6vFhJtZexCJ9tD8Frz57PN83rwnY2d6c1PHWRoy2PRrDT5rakOK+b74gSXXkthrDLW7QkTENS6G7GfT3AH0H7uF8Kzi2/MiN9Oz1QT8k9UIykisu7RjwwUlyX7TmeuaLOEhFFxmfu/phCpzuBHqSO9P32KyryhwHrHnPRla+1XGeUi2Ock8VzwTU5ESHYrziFr82M+F02fOEnZT7Ajv4TNtMrvvaDuHrLDFDP3d9MxYIyTgPqEPi/fdkUygudHbGNljDoFpAkJuHCddBlHr0y4s3OjHjTyBVP8xNGy2hjgz/Y4qK5bTO8fx3SdD8Dp9hjNnQjm2ZSBff6ydoTw4TxUX1/dklE0kavIJ39aPFp2ddOtYOVwPdaZvzZr0sbTjSGQeRdmHGf5tS5yitQIpU/YxrsVv2JzNQsM9doxqwEiHRARFEqfdh1P7bf3svrznVIzE0k6voE+vhZxKViEUxLBjXjtqGq2JFSb6MK7XDI4liQMQBVdcRzF4pj8ZgulyjPjuZ2bY2OEXlk1R/ikm1W2CrW7RuLz2VLpsD8a4WD+1R+b0CPIj19Gnux031JAdu5Xe9brhJf4Bkzp8RlmKxDTpeAz6hmb9l7Mz9Bb5WXuZMdGDNCmLfEIX92XpCTMNubSY8v9KIyCTWAUgM09il7Du2Zk1J7XscdenN7Va2qEfxIsk1ujDcZw2mPjUXFjcgPr9dpMjNnmjmZjyrhud3miGY6y28WhIx/bXL5hz2HBzKUkVxByywcpqCr999RLP/+cbJrvGGMxt+XeuEp6gJ4VSt1bwr5bEXmXA6mDOnAnh2I5lDPy1F6sOJBmIWqNJw7HrO/RYHmc4l+jciabDD3F5WQtajvDkijT9VHPzZACxKrHzTGBZi3d1JAYaMrHt+KYZEhOFVRFiVZdWFsXmREGIYl7zukz1uEBqIWiEeAJPxJgsWcrugXzz0wYjQlcROr8eLScESbMGxe11NPv3z2y7rWV8VaYD7d4YgH85E1n9LFu/aae0ma38PFWErutFP6sgxNpVFeyg2yttcNeNOLT69gp91yVJsy9xULKhw3s6vBQcmvQFTUcek+4VZwEHFg1hsW+SVHZjnRJxK/855ZsTRZO5Re2aWB0rBiL/6jzqf6Ilaw1ZeAz4iL5rtHKKAgjqy8ysW4uFJxWYLEe7GnRbFqvTlXy2dHuL0a5aPRXU5benkmWD8jE2pQpl9UhMlX9lHb1/WcBZqZh5bOv1NgM2iLN9tVkd1s/Elofmc/2gDba+kYbdqQUxS2lRZzS+55OlOlIknOSUwcpgSi753KMgIJNYBdAzR2LirUXZV/G1W83aTS44zmpJzUar0fWFkjmx0cfjS6xTZXh1590PJnJWWZLEso+P4YMXmjDHzQsvL/HrweoZw9gYUNyBFIsqkLhnLCMWXZYaiTgL3D79J/73Yiu26kiwOO3DH5kyJ97dM5B3X+7B3rvafNVFRxn2wUt0mOKmk9sL1/XTGb88EGXaYWa2+Yh/P/siH33biQU7bmhNTeIaY8tHIzFxM8UtfytafvQi//z3OzT4ZTr7r5sifAUHJnxKnR4+ElZaqQUSNrfjw9rziVJpO8Pmr/XniA5qVbYT7V7qwf5yxgAVIbHy8hTN0Zf22LFmzSa2bJvJT680wylOS6KSvrV8h8k+WoGMSV5QX2NO3f/Sd+2dEuZFfS2X7ujLf075JCZuTPrhuRZs1Su02OGHz+dHHYmplccZ+fHrTNxerKPioGZd25foaHlVS2JlyvEGY931wOaztcebDHUontaX155Kl00ksfIw1mNS/GuaxERdunttNxtXr8HeZSu/t3yDniviJXzN6bBEYg1r0nPGRFrU/BlHI5OJRpPKXqs2vP/vZ6nxvzr0nupLvCnVLBZMPnoEBGQSqwB45khMmbKbQbXrM/9ImqTwWX5D+KrxahILc6V3b6SZWCkSS3P9jQ++mC1tijBulDnnZ/H1P39jt343STlyiZ3a5u6/lOhcxDWWnSN/YIqu4yvn9gpfMkViOWem8/mz72N5RNsqhaKzTPm6BiNdxLml8UfgTtgVaY0oPz2SY+6zaPFxCxyvq6XObUUJEktnfbuKzsTURAcGkliUwtXL4oywkOSok2yZ1Yo6TVcbC6A7VnJi+lfU7uyJvuuUdhPa/swn9VYgLlWInWGL14tnXo+LxMzlKa5FeQ3/lnYTj0rrimrlXvr97yccYwvJzVXqOn/TJK8R4ljS7CW6LS02WxsX2linHvyc8klMnF02er6YXMXnGJOYOFOb+OUrjHErfuFC2vHYsgZdl9wotxxamUuS2IPak3HZxPsrX2/GJKbVo9tqgfg9w/nhh8kEpYqDiEK8h7xPzxVxFObeN6vDWhL7mnnHs4nzHUjdRkuI0HF5UWoY4bfVaArTuBboxu+tPqWX9Q3japKPHyMCMolVAEyJxKQt9sW7E8Xbkrd24uM6+h1qAolbfuXzBquIDHHB+4JKmok1eGswRw1tPJc9Y7+m8/zL0tqMIsWWVrot9kLRJWY3+ILpfvqtGZB31ZXtQaaGcIXsGt2G5aeLR8DiesTJecNxiBDNkWquH9rAxt03DObFChSzTBI9iRlvsRdNJXWffY7ukvkwhysXwgha2JCmgw9IJlIxE6EoDFeXAALmDGLjNa15FJScmNWdVaEqRPOYbcf3sNihlV/c9jyu5itGI3SBmxvbGG2xV3NpaWN+GhOAEgVHnbcSUxjCrAHLiNFlL3ao07talimDeOL++bn8WGsyZ3QsJprBvAZ/Qe9V0ZJZq/KdoXbTjvT6hG6pQzInGm09Ly9P1X0X2r9YD5twrfDKu860+29T7GJOs23LpQd0/mqu2bahdgsbEvVlV1/DfespqezGOvXg59xhVWvzW+wFIYYVP9dk0vZMA66ZgRP5+qNJuvfEFARa1aPVhECDKU11fzf9PmuJU5R2sFL+jDsfl+5vGGZiD2pPxmUTBSoPY4PAJQ7K6tENdRabfn2FTvMiJF0QZ5Ibf3md7itiCHRxMqvDWnNiY6kONZp0fIbXpuOMU9IGm5wzcxi7ONxgXs85M4tBs85KkigSDrJh9V7i5SWyEjXzKH9kEisHPXFUecTBmiULRtHs/f/yTu2ezFmynPU7tO9IFcZ7MaRZW+a4HuOg1yZs3dcw+Md2/D7fidAssaNbweDhy9mw2hm/gEB8N0xhouVebqlAeec466e05+OXv2eIpSfX8iE/bhfT+g9hocMe9u92ZK3NQcSdUKY+hYm7mDpoAnZ7zhIRcZbdDvNZ4nhBakRiJ+057HPqdXcnXWuhMpVFOecEEgOdWTyzE5/9pwYNuluy1u2sRFKCEMn6rh/zdWtbrkV6sdE1BlVRLNtnDWCUpSN7Du7CYZUtx+MLODGrDd1nbGb38dOc3O/A/Pn6l3AFrnsN4dfB9gSEnGSXhw9zOr7Mlz9bsDUonfgTdkxo/wnv1enDUo9LUgdZEGNPl2aDsHG1YdW2MJRFIUxt1REr+52cOB3MHkcrVnjcNFMmBdd8ZzJ8zHI89uxk86JxTJp/EHFPiOL2UdZatOXD/37DAEtXzsYGYDvtVz596Sv6znbmnH6B0yjnwpt+LBvdnPffbMTYxTu5dvMgKye04sPX6jNyvjeX48vP80zaTbaO+ImukzBC/kYAAAJVSURBVN04etiLTTZurBjegM4TF7DtWDgHbS34+aOXadBtLttDb+K/eRrtPn+Zer/NwT04A/FF5d1WfRkw1Z59/ntxWbuBIzfVJXRq+JJ9xBXFmX2O6/FIDttNofUnr9G4txU7LxbPU42KiuKWH7MGjmCV11H8dzuwaEpbPvpXLfrN9JRMsYIyBs+ZQ7FY5s7unU7MHzeaVftvoRJumS1H3c5zcAuK4cSWOfxa62XqdJyOy4lkymtPJ6OK24tYtqiE8jE2VW9iuUrrkQo1UdtH0LL173geO4zHJhu2rh1GkzaTWOp41KQOxxXdYNfqMTR99z06jFnNkeh7HLVsQI1/v0f74Rs56j+Ddu1n4uR7nFPBfmyyWsAe3UaRlN3DqPl+b3YnP1TDNK4a+ViHgExij0EV8tPjSUzXzio0ihxyS02eNEI+KfEJZJY6b+7R+elxxKXkmVzzML5HI2Rz46w/fgcCiUo2npUZp3oSx/nEnvFj/7EIjN8C0BSmE3czhXxd+1Tk5aFEyd34SK4nZRtGpnqJirKTuBmfgRKBrDux3ErLRllO2xby04hPvKebXSrJy1OiUd4jLjKa5GwzbK9/mPRbSFrCLbKryChYxCs+Pl23VqcgJ6eCCqIrk0aRTvzNZPLKwUxM+qjPEc2v+WnxJKQVolakkxB3h3u5Yq0Vf8RnJN7KfKSZvz63B7UnfbqH/S2pR/pcCkmLjydD14wKc3K0m34eoMP6u41/Nco88pVQeC+eqOgkciqimsYZyMeVQkAmsUrBJSeWEZARkBGQEahKCPw/Hsut3bXNjrUAAAAASUVORK5CYII=) **SetFitABSA** [SetFitABSA](https://huggingface.co/blog/setfit-absa) is a framework for **few-shot** training of domain-specific ABSA models. It provides an efficient and accurate technique to detect sentiment towards specific aspects within the text. The following graph shows evaluation results of a SetFitABSA model that was trained with just few k finance related sentences and tested on the same 235 FiQA_SA test sentences that were used used to evaluate BloombergGPT. The size of the model is based on its underlying sentence transformer and is usually considerably smaller compared to SOTA LLMs.Specifically, in this work, we used the **paraphrase-mpnet-base-v2** sentence transformer which contains only **110M** params compared to **50B** of BloombergGPT.We separated from each prompt in [FiQA_SA](https://huggingface.co/datasets/AdaptLLM/finance-tasks/viewer/FiQA_SA) the tagged and non-tagged sentences, to create [train and test sets](https://huggingface.co/datasets/ronenlap/SetFitAbsa_FiQA) correspondingly. The train set contains 646 unique sentences overall.This graph summarizes the evaluation results. It presents the weighted F1-score achieved by SetFitABSA as a function of k, the number of sampled training sentences. The result for each k is averaged over 5 seeds. We can see that with only 50 tranining sentences SetFitABSA exceeds the BloombergGPT score. BTW, if we use the full training set for training the SetFitABSA model we get a weighted F1-score of over 86. ![SetFitABSA_vs_BloombergGPT.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAgEAAAGzCAYAAAC2DMSCAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAAJcEhZcwAAD2EAAA9hAag/p2kAAKKdSURBVHhe7Z0HnBRF2rjvu/P/3XnJdAZERTHn9Jn1zDmfOStnDqhkEDAiJhRzzmAgsyw555wzkheWBZa4LOzCwvvvp5ham6GndmG7Z2eY9/FXsjPdM9Ox3qdCV/1BFEVRFEXJSFQCFEVRFCVDUQlQFEVRlAxFJUBRFEVRMhSVAEVRFEXJUFQCFEVRFCVDUQlQFEVRlAxFJUBRFEVRMhSVAEVRFEXJUFQClF2GgQMHysknnyzjxo2LvZO5PPnkk/L444/L6tWrY++kFuvWrZMnnnhCnnnmGSksLIy9WzbZ2dly1FFHSU5OTuwdpSyKioqkdu3actVVV8XeUZTfUQlQ0obc3Fy57bbb5J///KfstddepWnvvfeWN998U/r27StVq1aVkSNHxj4hsmXLFpk6dao89thjJnjst99+RhReeuklyc/Pj621PS+++KLsscceUrdu3dg75YffnDx5stSoUUOOOOIIOfDAA+XEE0+UBx98UEaNGhVb63dY/4EHHpC//e1vkpWVFXvXDft68MEHy5577rnNsTj11FNlwIAB8vDDD8tdd90lK1eujH0itSgoKJB77rnH7DdCUF7atGkjf/nLX2TevHmxd9zUqVPHnEfOdzyc/+uvv36b66latWpy5ZVXSrdu3WTz5s2xNUU2bNggn376qZx//vlSpUoVqV69ulx00UXyxhtvyKZNm2Jr/U7v3r3l0EMPlf/7v/8zny0PX3/9tfz000+xV+HB73PtnXTSSbF3FOV3VAKUtGHx4sVy6aWXyiWXXCI///yztGvXTtq2bWvStGnTzDpk3ARVS48ePeSQQw4xAvD6669Lq1atpF69evKvf/1Lzj77bJkzZ05szd8pKSmRo48+2gTwY445RjZu3BhbUj6GDx9ufu+0006Td955R77//nsThP7973/L+++/H1vrd8aOHStnnHGGCVYPPfRQuUrGvXr1MusjK/YY/PrrryZ4LV261KzDcfAfi1RjZ7aP/USWyiMBnDf/eeS8+lm+fLkJ6ldccYW5Ltq3by+fffaZCe5cH1bICPLPPvus/OMf/zC1K5zPDz/80Jyr8847T9asWWPW8/PUU0+Z3/7Tn/5kzm95OPfcc42UhA0SgJAiiIoSj0qAkjYgAWTQVCPHZ+hA6XL69Omyfv1683rBggVy+umnm5J/fPXx0KFDTRClNEp1qR9K2fvss4/07NnTlPo6d+4cW1I2BDUCPiXKiRMnxt79naCA8fbbbxuxefnll03AYrvLwkoATSBBLFy40CR7nKhFmTFjhtm+3377TSZMmCCzZ8/ebt/ZPt5n20kct/hjjTjxPrLCd/JdbHPQOVmxYoURtPHjx5tzY2tfkDW2j++xJe7i4mLzesqUKWb9mTNnmnPqZ0ckoGPHjub8cR45n/37948t2QoScM4555gAz29b2GZqjGiqgDFjxphSPaX+ePhcvCTSBHPCCSfI559/buTuueeeiy1JzKxZs0xJ/YILLjDNWez/kiVLzDJqSubOnVt6TubPn79NLQVwXrk/Jk2aZM4H58XWAiWSAL6f74s/xkpmoRKgpA1WAmjvDoJS8F//+lcT4IHS2v/8z/8EBnEyb4L1vvvuK/369Yu9uxUyTIIyme+NN94o9957b2xJ2RAQyPTJ0MmsywJhufbaa6VmzZqyaNEiOfzww+WLL74os4RsJWDw4MGxd7aF7b7uuutMQINHHnnEHBtKvNSAsN80J3z55ZfbBMBatWqZKuzDDjtM9t9/fxM4qHXxV3kjVZSgGzZsaJZTaqbUG9+UQQmY7eB3aBI58sgjTTMFEHhuuOEG+c9//lMahLp06WJK5dSiELwRKY7lsmXLzHLYEQlA8Ph9zuPFF19c+tsWvwT4948gzO+/9tpr5jXywD5yXsoDVfqcRwLxRx99ZI5lWU0Ct9xyi/y///f/5M9//rP5bWqvWrZsaYL9Bx98YGqV+B6Wcay/+uqrUhFg2xEdzgnNYXyWdZo0aWKuR0QvXgIQPe4l3qPpSslcVAKUtAEJIEiQcVMqa9GihSlFv/vuuyZoEhgpwdk+AQQ+MkVKoEEQ/P/whz9sU0W/atUqOeigg0yGTyb7zTffmMzXH4jKgrbd//3f/zVVwn369DElWn+g9UNJnqpqmjaAtlsEJKhU7Yd9pT8A1dM0OXAcSF27djXL77zzTtN/wpYGKdXSln7ZZZfJL7/8YraLAEyw8FdX0weC7UcukKpbb73VtH+PGDEitoaY0ioCQoClo1737t1NACJQWelAgBAh0rfffmvEjOaKZs2ameUE5jvuuMMEatsngO166623TBMO67MuIkKTh6W8EpCXl2fOG7/NeaRUjoz4O0oiATTRUKVPUxHXE79FcKRa3koctRxU1Z9yyiny448/yujRo833B8F1eP/995ugzrVEzQZ9DThOLqhdsTUBlOSpCaBZh+uGvghsG+eEWiqu6913391sB3AsOP5cN5wzmqNat25tRMEvAZwfQE7Y7wsvvDCwOUzJLFQClLQBCbj66qtNaYiMmkyPEt7ll1++jQSQOVI6uvnmm02pN1H1Op30KB03btw49o6YTJ5SK6VyoBqWKnpKzOWF7Xz++edN5k/gJXjQMZFAH1+NS/AhwNjMmA5lfA5xcMG+sh7fzTEgcUxs6TVeAp5++mkTOAYNGmReA0EFSWKfE0GV/bHHHlv6vUCgonrdVlcD203tAtsFr776qhGFYcOGmdfxUPqPl4AgkBf2y9YWlFcCCPqcN84fEIw5r/6Od0gAx41jQEDkekIwWe/RRx/dRvyopTjzzDNNGz/rcx1y7giofmhqoSnAf73QK//222+PvUoM1wG1I34S1QhRq0LfFqDmAuEJaq4A2xxADQ9NLQgAMkhtgKKoBChpA8GVTJvSMiUxAhwlT1v6pEp0RyWAzl7+kuZNN91kPkfQ4TtowyagUrLbEWgvJzBSsmV7KdHyW/5aB/aBAEGA5rcotREQKZ0TRF0QbAmyNHXYY8C22oAaJAEEbn9Qof2Y3/r4449j72ztO8A20u+CZhCOB8eUR8wsBCuCiL8DIyVXgielfSDAE1BtJ8V4giSAbevUqZMRKJ4auPvuu03plXNog3l5JIDv4RzydATHhGPL79lza0EC2BeaCbi2OFbIH7/B+WI7/CBm3333nQm+CAO1SNRM+YMp8kHNDtcWwsc5pWYFcU1Ue2DheNGEEw/byffSDMY54ZhxTtg/YB95j9/gPLON/uODBCChnH/2l+an8jRVKZmBSoCSNpBRUyokM4wvUYOVANscQGnO1RxA1eof//hH024LZOZUfR9wwAGmepUSE/+SuZKx86jhzkCwREyOO+4409Swdu1a8z5NAQQ02r4JKvwekkOJnXUTlQLBSkCijoFBEsB++GF/aH+3YkKQIghRyiRAc5wRGI4p7eYWghVNAX4JoDRKdTvV0ECJFsEhgAURJAHIGMfimmuuMYGZ5hSq6inBUsKG8kgAbdzIDVLiP4+cV9rqrVCwbewL+0awtnBtISCc80SBm5oFmqEQAVtLQrD973//a2p/2GZqF/ht+lDstttuZdYmBUkAUsJTCJwnmm9o/uG8cKxpqrGwT4gjT89QK0ANEU8xsC80B/A5riuOCdeFq/ZFySxUApS0wUoAGVpQG7uVANt+TXAnyCfqGNi0aVMTGG3nOjJ1OlURfGgbp/RLqY9SFHLw3nvvmfV2FjrdURtAFTKZM8GDTJnOb/b3SFSBk2EPGTIk9sntsRIQ36nRUh4JoGqY4ELHM6D9nN+lIxyfI6ghUKzD5y0EK4J8kATQ8RAoyRP8OGdB+CWAzpHUZHDs6SBJbQRBinXofEhb+Y5IAO3nyATnjfPHMeX4cl7p5W+lx0oAx9s+UQLsN2JAMKW2JBE8SUHAtyVy+lawrRybRo0alZ5PEk8J0Gzlgm2Jf0SwQ4cORmSpCaDEz3ZSs0ETjV8CAGnkeFMLwTZwL/AZrnXkBDHgMUiOM9ecioACKgFK2rCjEkAzACUyghFt237oeEbHOoKV/S6qdsmEqcKm9EQw4F/avslwqU72B4sgKFFSegt6FJDvpx2fmgBKklRz23EB7O+RCH60Z1Mln4goJADJ4Tv9205HQ3qt+6vGyyMBPG9PsLYdHuPxSwD7TImb7aMTnIXzzbkjgJZXAghsbBulZmTCfx45r9Rg0J4PBEj2BfFguYVto9c8QZNjQd8Af/8HC9cZfQSQO/jkk09MgOWccE3Z80migyk1Ea6e+Fzb1AT5ob8G3+nvy4Ec0pfFSgBSEH8/IA3UPnDds4w+ATwuS4dTagh42oGaF/9+K5mJSoCSNhAUqGKlhJdIAuic5u/JTrCkVEgVNyVvMlVKhWSCdHCzbaP0qCYIkZEHQXCi1G4fP0wEEkANA6XIBg0amAyXgEgVN49/2YBBoGCkOoJDEFT5st2JpIP94vOJJIAAS5CwEkApmCDkBwnguNiSMSVZjh8j5rFdlMJZzu/4mwOQF2QpXgJo6rASwHazDjUfBFm+j+OCnADBms5yVLvzNzUjNInwHTztwDbRnEDb/FlnnbWNBBAAE0kAtTp8xt/PwQ/ngpI1+4qM0UxACf2HH34woxFynvldfsNW8xOA2Qbkgtol1uXxO64HzhF9BZAF+o0kagLhOkM67dMRQXCcONZIGX0j6CzKkwJ0NGQ72Xa2CXHjONl+KlzvtPVzbVGbg8xx3mhK4dgS6JFNpAYQAR6fRfio/Qq6l5TMQSVASRso2dGpiVJpUMZFGz9tvvbRKQuD1VCqJvNkiGEe36Ont22bB6qQCZJ0cAuC4E+Gnyi4+OE7qGImcBAokAsycUpnZMBsOwMDkSkn6meA0PBZeqUHQfCn5O0vIfoh06eWg8fUgOFz2X8/HBc63hFcgOpkgiy1JxwLAhrSRDs1VdoWOgUSzP2CwqAz9GOwHQMBASFY0rbO91ESZXhnQCDYRtr+rUzQrk3tBUH6+OOPN488IgOU1u3TEwRHjmeizp4EaQJkoqYUzg3LWY/gSGAnOHNdkBAIAirSYvudcAwJrqzLMaS2iSBL6dqePySIay9RkEcSuHZJieD6puMix4rE/nNOeGQSYbXbxmsEi0cRgVoKmj2oveBz1CLxGKEVXGoikEC+w8L38nQD63POXf1PlF0blQAloyCzIwCT8TMYCxmkoihKpqISoGQclMSpTaCkq6OlKYqSyagEKMoOQHUy1edBiep+f38ERVGUVEclQFF2ADqF0Z8gKNFeW54+A4qiKKmCSoCi7AB0YuNRs6DEo2Q8XqYoipIuqAQoiqIoSoaiEqAoIUKfAB7d8g9VzKNyPN/N42EMOMRz5TxKVtb8AOkA49TzqJyy4/D4IyML2sc4ecSV0Qft652Fjq88EsusjGXNRrmzcE0zCiHjGFQmPErL45H+x32VHUMlIINgIBVGJePZ7FSHoXUZeCbdhjZlwBnGIfCP6c8z2jybziBHjNLGwDcMjMO5SBfGjBljhl+Oz2wZifDvf/977FXqMGDAADPaYSo/AspkQIyBYEcjZPpq5iJgvIDywDgHjCMRP38ETVY8/89gVYwmGDY8ZovAMESyf2pmmsIYf4FxJegjwwiZjE/hH2CLcR4YAZIxMvg8fzOKIzLpH6sAEWKMCoSaY8R3NW/efLvmNmap5DsQHmXnUAnIIBiFjpHyGO+cYWtTGeZCZ1CYRGPPpypkUkxEZAfSIWgymQ2PJPI3pTRKZwwtnOrnwA+j/dHxMX76WfpBpOKUtAyUxGQ6iSYwSgWoQWGYYDtJEQEVQSxv4Ga+BSYookTuh2DKwEsMixzFIEBc21zT/rk02AdGf0Q+2K+XXnrJDO9NgLaDGgEdZxlqmbyIwZAYs4Mhnhm3gxK9JSsry8x6yKBIXHt2xEsG3fIXDDhWLGN0TO2Ps3OoBGQI3DiMdsYAOUyiwnChQXBTUcIgEbDsqGnAMhIZC8tYJ1GGxefsOv4Z2uJhmf09+10ESYZOZYx4AozdFheu7eY3dna7gzJR3gvabmCZrYLlu8jUGWmQKYXJPO26LPNvo8W/H4l+348dgZD1+Teo+tf/nfHL7f7a7bbf49823mfEQIavZVwFStf+/fB/J3/b7ba/y2sL6wf9hp/ybC/Y9eK/i/cJLJRGES3/9sZjzyWft98d9LtB++UPOv5tTvRbfIfdXr6H4O2XAHsO4vFvl39fqUFglECGiGYfSfbz/Mv3+eG1/Z6ga8vuI/BvovUozROgEVkLtXYE8p9++qn0GPBbiK9/dEdqCpB7ms34PdZjuxkCmfctfIaJkOw9w3cxsiG1Toze6IffPPHEE50TbimJUQnIEKguo0qaEgIZOhmkP+PkRme4XdqsuRkZopTJW6ztM+wppSvGlafakuFmuekpbcUPuEOm1rhxYzPzHutwg3Kj2gwG+JvJZRhal9oJhorlt5nwhHUZwvV//ud/zE3PeOpUB5IRBMGQrbS3M4wu38Xwtf45AMhoWU4GRDUkk/jwffFDBNPDn3kFGHaV3yeDZex/f6bONvCYIOPn81tM7kLAscPaUmXOcL8Mo5udnW2Ei/2g9oX9sGPlMzws+2vhN6japZmA8fv5DuYboLozEWSUlLKYp4BtQfIYptcOw8v5paqYfed8kshs/UMVd+vWzbxPzQv7zu9S4qJUZs8Xx5ImDkpw9nwwCx2ZN/MLsI8WxqRnezjWlM4IFlT7Uj3PNUQbOMeMfaS06B96mO3t06ePuaYYmpftYP9mzZoVW0PMNcOY9xwrSpysw7XzyiuvlG5v/fr1zeQ5bC9zF9jtDYJAhtxwvbJtDP3LsWQKZf+kQewXMkcAYs4DjhmfAWYaZLhj7hv2i0GouJf8wZNzxfHguuI3GJaY9n+GYLYSwHXFb/ulgOvKTh3MMu5Jji3nmOuafWSCJ44J+2knbGLYZfqc2GuXa44qfIY75vpnFkLGtvAf/xdeeMEcS64ZzgHfyfdwrP15BcM9U8L3w3wPXDs0G7lg21mPuRssfDdNZeRPLtgXji/XqR/uf4amZn+UHUclIAMgMyHDIQBg5lg4GQE3j4XMkKpFxndn3nOsm7HFyVCBDJzxyMl0mMSHm57gQMAkQ6HqEWjLu++++0xGzzjxTF1KhsoUtR07djTbQqINkO8iENL2RwZIiYZMhCpcJlPhMwRhMiEm+PFnRBZKHQQ/ZIIMmt9jm6lqtKUiMjQyDzI0qiNJfIYgb5sbyCSpeqSak32jbZ8gQ6ZtJ/khE2KOAWooyPTZZpZxXOx0xJSSCEwjR440x4ISEpk367AfHHOOJe2gjEVvsesxmQ3Hn+9G1vg3CI4hQoZosT8cW84XgQmZgd69e5sAbY8x8sY+I2U2wNFuToAh86da9eeffzaTBRHsv/76a7MOpWm+g+DOdyAOTD7E+aDdmXNt4ZogKHGsCfKtW7c24kXQJPMmEPIb9EshQLPcwvdyDLhWEUECL5k7589uL9cl54T3mZ+B7+LYIhv2WCG6tCNzXfI93bt3N9sbBNc928f1wfHkGqITJ/cHooboANtixQRB4vqg5EknTwIr/Tu4pkk0m3Bf0K8FuLYI2AQ/aoSYpIhrjQmlaD+3QZ9rnX2zr5kkyX6OviTsOx1KaWfnuqdTHL/NOnQq5FxynJAh9h0x4h7gPHEvIkYEWwoEBHJec+3Y+wSB5J6kyp3PcjyZhAiJ84sj5za+XxFzRjDpEtvJnAU0Vfjl2cL1wbliYC3uaaTPTv1Mm78LjjWiY2e9tHC8OAbkGUG/qbhRCcgAuCkJerYajRuFjM9v1Ng/gcCfKfshcHHjc6MTcCx8joBBxkZgIiMnQ43vrETQQx7IEAmOBA5/STgeMgmChL80FgSlRLbJP3FNPARWApN/YiEyIL6fwAlsP0GOErwfAhKlHjJKJtwhA0QO/CUoP1YCbAmeEiCBmGYYC/vvlwBeE7SoBYjveJcIMnaCki2NxkOQYMpcZMVm8sC1QInVZrgEDgIe59YGPCAQ2Cl3gRI/1wwB1g/C5ZcAjiM1H8iChQlvOP7Unlio8WGiIsSDfeGa5LW//Rjs7I72+wiElFApHVsIOEgg0xJbmOCIYGxnUUyElQDOmZUnQIAIVhwfIFjSac8/QRDHlb4eiLM/+PCdyBmBl3WQEAK+nWERqOrmuuAYx0sA28ExsU1i3FNBcN1wbSLdfrg2rQTwPfTb4J6MXw/pRiJsdT3XEvcStUIWgjRihlwD+0ntHm35fthmnkggL2B9akPeeOMNc8/5jw3n8Y9//KM53uw/Asr1x+RG8deWH65NaiIRovhpwVnGvpCf2MKIUn5UAjIA7BsJsCUTMgYyCKpB/TO4kfGTKDXakqwFCSCDJrONvwn5boIlVeVUqZJpUxKn9EQVJNV0BDgyNDJsSsNkipTSEkEwIUiX1XmOzlTsBwEGgWG72VY/lBJYx/8+GS+fIfOARx991FTTUoVKlSUZME0hBGsyHnpsE+DJsKjyT0S8BHBcKeXQB8MG43gJ6N+/v8mkOe47Ahkt20zGymyC/iBGyZdAQNU154D9IdmZ4yiRA0GO40zJ1sJ2EmQpAVoI9lRF+6vmIV4COOeU1vztsxwDfsMfQDlvbAOSghjS9wOp4Rpje0kEXqqoCTpWWJEAriP/FMqIE5/zSwv7jVgRAF0QsGk+4vz7xY6+HJwT7hPgmqBJxD8sNL9LzQr3BLU39jhzv3HN0EzEvlFzwDHnbwuBkZo5ajniJYCaMI4P1yYCG389Wwh4iJNffiBeAhApatVsU4GFbeZ92yzGsabGxn/MeNSV2jGkHLh/OR9BfYrYP+5BZIMaBPaF7+d4WDinSAL3CfkIpXtqyDiOfCZI2ihccGyRP79cWthHBIFjVVahQdkelYBdHEqEBGgyWDoi0TZKdS8BCGunlGLB+rFpbnoyET5nS/RkRNzcBHO/HADBlKpUMh8yU76XpgUSho9Y0AbPd9OZisBDRkIVZiLKKwFAtTdBlVINJXXa3f2PJVFVS2brL2WTgVIVS7Us8LgW28S+2O0m0WZLFSrrUz1KZs5xSsTOSABSwTGjendHoNaC4EfVOIn94feBIIwg8L5/f/ibYEkpDWxNABmxhe3kOuF4WsorAQRBapT8bcM0uSAGBAAL1wHXF8eA88L5Iugia3ZbSZw7ZIcACVYC/G3KBB+uO84VAQF2RAK41pFXf58TzhtV+szRD+wXAdL/uwRq2rFJiKZ/u9lmalsopVLLEtTezbnjfoqXAK41tpv9QZISUR4J4FzS055zTL8MP9ToUfK3wmYFnjzDgpzRfGRrstgfgrGtQUsE+QU1GAR3alTsEyTIFNeHvykSuAeQrKBH/RALjgtNTwhBPJw3jgF9X8o638r2qATs4pBpcRNTNUrAI3MiEQio/qY91Q8ZCKVpzJ+SIJ8lk7alLTLW+Mf2yHDIrMg8qB4l8JCJUmogo7IJy+cmpjRFRzky9ERYCSCTLg9sN9/L58gYEQ9bY0EGTZWvXwIoNVPysCViShKsQ9tn/HaT2QNNDmRG/oAZz85IAFJBRrkzzzoT9BiwhVoEOhLaJg1KcPxNIPLvC4mSpu3ZjgSwTwQKC9uJKFLDYyHYc3x2RgI4h0gAAcDC71NlzzEgYHDcKSFSkxC/vWTsnF/YEQngmi+PBFATQLOEvzmEDnmcR9q4gf2idO8PXmy3vfapUfNvsz3GXO/cE/HBle3kc0E1AXyea4SaAu5TW1sXD+txrSOwfvwSwO8Q5An2VhAt9F+gpG6PpZUA/3GgRgQx8zdncT3T/l4e+A3OF303wHYMpDnED50rqR2Mb2ZADijA0NRgr4F4uF/J0yjk+LddKR8qAbs4BB/a3igtc7OQOZGoNqP6j8wi0eAkVL1xY9JBiQDKTUYpgDZeC5kiGTyde8jwaDskw3FVmZMxU6qKL8H4oaoSCYhveojH395oITMloNiSD8EB6fB3bqIKlH3hGWRg+8mA/dXY8fAZSod0SAv6XdgZCUBIeNqC14megIgn6PeRN44Z+0/AoZRJCsoYbbAsrwQQIAiW9ikISxgSwHXJNhJsEJkgbAmwvBJAbRDt5fG1VvGwbdRwIEz+qmiEjJoh2xYeJAEcY+4J7iFXNTTfwX3kvyd4oobjS/AK6hPAOaATIdsQ37/GwnHjKR/bpGWJlwDub76HQOqHvi1cm9zfUF4J4JzFD3TFsQsK0tQ4cb54UgZsTUD8kznUsnGM/D38afLh2qITYyIRAvrqUFihCVLZcVQCdmHIAKmCpwaADCMeSs4EQjJWAgi1ApTEKJnyHlXjNA2QKVDqIaOlcxRtb/SKp2RMpyhuVFtip3RCJkFmQpUh8kFgJKjQ+5jvImOivZ0OQvwmgYhMgtKF7ZhHGze9lwkyBGYCeFBVINtNtTLSwHbTz4CMkVK+3SZKW1SH2n4Ddh0Cui3ZIkZ0XKRDF0LAdnMM6KHPqHhAgEYayNQYCIUaATJ2ZIrtgJ2RAKCqnEBJYCRocEzs0xVBUBvDfvO9SBm/y7lm+20AprqXUhTVpOw3xwexo3ratg+XVwK4Lthvtol9I3BwHuOfDtgZCeCaAQIvAkkJmWPPOeAzBCvbVFJeCSCgEfjYX9rEOQ9BsG1UWbN9BFOOI23Y7APXDdc9BEkA1yOvqeqn5olSL9tMrQw1TBxb1uF88xs0r9j7hpoKrm/X0wHU5vA5zgPHwZ4/+lYggew338M55/xxDXKfc51aCeBckujYSZDlHrRD7VL9zj1qxbO8EkBtGxLvFyw6HXN/EYh79eplfoPfp4YLGbPHkWPLeaHzIfkP/WG49rmGEDG775xfmrMQT64L9o18wOYFfrgPyaf8TZtK+VEJ2IWhJEzJkBKFzRz9EEiouqdEzs1OtSI3IoGD2gMyKBvcuIlpDiBQ8n00FSAQSEB8SYXexlSB8l2UyAmCfI5AZEuwmD0ZGzcvGQW9hJELO+Y+pSyqaMlsCOBUiwaVktluHuUiw6BNmcyEYMh2W2ngt/luqhopMfB9ZEz+zmVAYCO4k6myTWw/JR5/ZySOAxkdmSwZNvtG6dVud4cOHcz32xoFW63MMbMSQKmJz1AKtHA86JFOps/3ElgIavHbaKGJguDI8eMYsT77STCwx5h/CUQEM7aB88p20/RhZYsMlKBK8LOwnRx79s1CjQzXB9tGoq2cYMHjavy+BbnjnPure+nXQcChc5cFCaC6m2OAgAHbi7gQUAlGfC99PDgntlMrAYHH2PxPehAMKS3S38Fe5xx3BIPzyPYic0EgAQQarlc6Q3LuCVJc+3Ras7BffJdfboBrjHONaCMJHGOCNr/tf9KEv6mV4Z7huuL7kDPOma1FQBA4F/Y1+4JkIBR8L4nr1z51wG8jZNRk8L1sH9cf77MfCKu9FpADZJXrhOPB/c2jkLapCxADlvklAElGXhBjCzUVnBt/8xXHhdoXtoXjwDXANnCN+o8jAkFBgj4wbC/3CutzLPznlLyC9RAlew5ZlxTfTwKx4V7xd4xVyo9KwC4MNzNVgYna0shkKIWRQfA3Vf5kQGTafM7aO/A3HQPJtAliGDvr+atQ/fDbdh1kg/VshmQhIyMA2HUoWdhACQRGtocAxA1ug7qfoO32t/0DGS2ZDPvJNpHxJ9pujpXdbr6L9fyDHAH7Eb/dNvjY/bbCwv6w7QQqC+vyvfG1M6zLe2wf38vf8cfMwrHgO/luuz7bEXSMyOhZzvaybbatGpAtjp0/4wfOd3ymyu/xPZwPtp/v4Fj712MdfsN/zNgvfsN/DPisPQb22FnYD7u99tjZ7WU7+S7/97OMfbQ1Chb2wb+9QXDsECOeCuE7OOd8Jv4aCtovP6zPZ9lmto/X8fvF9/N7rMf1wfaxzXY9rj0+G/85zpf9bo6H/1yx7xxDlrGfttqcJjc+54fftNc23xO/L2wzy+2xBq4/viv+3FFrR2dgC9vMOqzLPtrfiK/G57U9t2wviX2LP94cC97nu+x6Nvn7eVAIQHL9kqLsGCoBSrmwEkAvf3+GkA4gAZS84zNFRbESQC1RIuFStoeaFmolbLNXZUGNARKAWCg7h0qAUi6QANqgqdJMt2BK/wDkJb60oSiUSOkbQtV5olK+ouzKVIoEUAVGGxnthjw/6q9+osqKNiTadOn1qXaeGlD6p02R9tn4Kr5Uhx7cdO5LtxoMJXqosqZPCb3SNa9RMpGkSwCdr+gswjO8PBZFydIO+Ur7JJ2j6LlNxzMSvUnj28gURVEURak4SZcAevHSU5tSPiVKHhWhhzptczwywoAkPI5CqY0BJugZ6n8cSFEURVGUcEi6BPAIif+RK0SAx7XsWOE86uHvyUt7LrORKYqiKIoSLkmXAJ6nZVAO2z7L4BoM1MFgH7Q58yyyv48A7/NeUJMAtQk8qsJzozbxzLv/dUUTneEYkCNoWWUkjh2DrAQtq4yUSseH7WB7gpZp2nqugt7XtDWl2r2eSoljQ94TtExTOCmK/ItzxngQrib1pEsAj5QwqAelf5oFeNaUvgHsPAOP0InLD0O0sjNBO8FzpDwiQg0CiUFcGGCCfxnhq6KJZgkmj+EZ1KDlyU6M2sZ0nxwn/g5aJ9mJoUgZGjRoWbIT4+QzyE3QskxPjCyIUDNITdDyTE/2Xn/nnXcCl2d6Ih9mLIWgZZrCSYwESQfmoGU7k4iJdOTmUU5/wTqepEsAMMADw8QyfSnDwzK6FiOQ8awu5uLvpcsodQxz6toJC4NoMFJVmI/6MFRnKnVMZD54/0hklQ2jEqbK0wI8Asj1pARDrZuSGGa19A9WpfwOQy/HD8akhAuDZNmRRsOC/JlJplxUigT44WkBhnxleFTGzmbIUP843wzpSam3PPAsOxIQVlBCPBiPmqcWUgUEKn7o0sqEwMJxTwUYOc01PXEmg1gztasGuWC415mDIX7kRGUryDWjGSrRwSikzKUQJhRiU04CGAubzIixAAiwPA5IUwCZE238SABVuvzN5DGMs82TA+XBSkBYz4NTA8A2plLGkIoS4B9/vDJRCUgMtWPcd/osfDDc60hAoiG2Mx2VgOhhOOSMkAAGCKIfAJNGMDkIPf/9Nx7VTixnwpMLLrjAvC4vKgHJRyUgPVAJcKMS4EYlIHoyRgKiRCUg+agEpAcqAW5SUQLYJvIy8rXKTPS1oa2aplv+DlpHU8USx5XJkZCtHTnGDOHOKLuJ+sypBFQQlYCyUQlID1QC3KSaBJCpM6QxzaIMrc59X5mJOfxpwg1apimcxPHlOActS5SY6pl/yYODREAloIKoBJSNSkB6oBLgJtUkgDyMDJ4e+fSX4rxVZuIe59gELdMUTuL48oRA0LJEiVoAHpUnLgRduyoBFUQloGxUAtIDlQA3qSYBBANqAFLlfFFFTcBRooNzzXHeUfgc14p/pF2LSkAFUQkoG5WA9EAlwE2qSQAZOo9Ks12pgEpA9HCP7owEcI0wfkzQOA4qARVEJaBsVALSA5UANyoBblQCokclIARUApKPSkB6oBLgRiXATSpIAHOmZGVlJewJn+6oBISASkDyUQlID1QC3OyKEjBn+ToZNnu5/JZXEHtn53FJwOTJk82cL8wBw5wwBGuedy9r23v16iV333235OXlxd4RycnJkXPPPdeMIXPssceaxN8Mlc7jcwQ7GD16tFx77bXmno+H/JqxZphzJp4ffvjBzGTLSLQk5rFhrhn/SJo8kfHII4/I6aefbgIoE/sQTP3rsG/MW8M63FdhoBIQAioByUclID1QCXDDvb6rSMBvS9fKoz+MltNe6SUnvtRTTvX+vf+bkTJ18c7fp4kkgIB51llnmaDJvdezZ08z0dCPP/5YZs3Br7/+aia38Y8IO2PGDKlWrZqZWp58jgnnGDCO5+H9MMx81apVzQA78fzyyy9y0003me9BUPy8+OKLRlTYzhEjRphAfuSRR5oJtoD8FQl56KGHTABFVJjYhyHt/XGAgZPYZ8SHSX/CQCUgBFQCko9KQHqgEuBmV5GAuV7p/6aPhsiBtbPk4HpdSlPVOllyxbsDZfqSnZvnI5EEtG/f3pTU/YGcEjPXGyxevFgeeOABOfTQQ02wbdKkicmfp0+fboLxbrvtJnvvvbfsv//+pqqfxyJPOOEEM/FNPKeccooJxuTze+yxh/zpT38yn+W727ZtG1tLTA3Bd999Jw0bNjQzrvphhFqms/cHW2bas7UGXbt2NdtpaxzAvz8WBk9iRNvOnTub6esZDr+iqASEgEpA8lEJSA9UAtykgwQsXbtB5q9YJwvi0sKVhbJ2w9Yg9Xq36V7A7yKH1MuWavV/T4d46aC6XaR+u4myOdamvmJd0XbftXhV8ORriSRg4MCBcvDBB8s333xjqvXJg+02c81dccUVUrNmTbMvVOHzmsDMOgRqagIIoJTouTapCTjmmGNMLQFiQfMA9zX9AI444gj58ssvzd/t2rUzvztlyhQz8Y49b7w+9dRTze/xd/Xq1bfJv5mp9rLLLjOD8vD9DGNPlT5T7wK1A9QwfP7552Z/yNv85wCYUO6FF14wNQDs44MPPmhqLiqKSkAIqAQkH5WA9EAlwA33eqpLwM0fD5Xdn2wnez/bqTTtWbOjCe7fDpkrGzaWyH1fj5T9anXeRgBsOqB2ltzkfceS1VvzszptJ8jfn24ve8W+a49nOprmgyASSQDHq0WLFqbqnfb7W2+9VT755JPSGfGqVKkiEyZMMIl2/VdeecWUoBkit1OnTqY63V+LQP625557mrlljj76aDnkkEOMRJAHIwdIAPDd1ADENwcQnJ977jlzvcPVV19tmiYsb731lvzlL38xQkGiBuKiiy4ysgH8DgGd72Z//vOf/5hZbP3zJjA4D00GCAOwv9R2kP9UBJWAEFAJSD4qAemBSoCbdJCAV7tMlRs/GiK3fTasNN366VC576sR0mdanuQXFMvt3nuJJKCKJwHXfjDINBnAV4PnyM2fDJFbY991yydD5YlWY82yeBJJgIU2ewJO06ZNTQn94YcflnfeecdU2xOECPakk046Se69915ZsGCBdOzY0bxnAzBQE0DwpZqdfJxBk2x+TjW9lQD6BBCoGVrZwhTy/BYldKr1s7OzTcdDah/sEwXUBPCaeRCIF/zejTfeKJdeeuk2U8bzm+QlNB/wO/fdd19pgG7Tpo1phuB+4nfoM8DvVnTyH5WAEFAJSD4qAemBSoCbdJCAsigsLpGnWo+T/Z5PUBNQK0vu/WqkrN2w49dAWRLghyBJCZvgaKvjCbD8y3Vo94n1qIqn34CFvgL0MQgKqEES4A98SAX9BuioSEndpqOOOspU/4PtE0CssJCH7b777iZvDYIai3333be0nwB9DnjC4LzzzpNzzjlHLrnkEtOEwFMG7N/OohIQAioByUclID1QCXCzK0gAdB6/WI5t3N0EfL8AHFgnS45o2FVaj9gayHaURBJAiZoOfVT/A3lwgwYNTGme9vaDDjpIPvvss9I8mX2ixz7XIz30Dz/8cBP4LVYCgu5jvwSQB5LX2+AO999/vzz55JOlfQRItOvT6a9u3bpmHZ4OuO6660qDLf/Wq1fPfBdNC7aZgr+pPWA5tRs0TdAkQMfF/fbbzzRvEBeofaDj4HvvvWeaHjgeO4tKQAioBCQflYD0QCXAza4iAeuLS+SzAb/JiS/2kH2e80qwz3c2/x7TpLu822umrCvaufOfSAJoF6ckfPPNN5tgy7+UkOnFTxDluXz6AFDlTn+BG264QVq2bGlqBgi0VM3zTP9dd90lI0eONBJAcwKCEA/B96OPPjJ/28+ef/75pnc/HfuQh59++sks9/PVV1+ZvgU0LfD44t///ndTG8Dz/2wvtRFsE4wdO7Z0f9he+gTwG3R8ZP+feuops348zABIH4NWrVrF3tlxVAJCQCUg+agEpAcqAW52FQmAok0lMnxOvrzbe5Y07DBJ3ukxQwbN8krHG38f7GZHSSQBlIR5jp/e/ARKnrvnKQDbBg+8pnMey2nrp7RslzPOAJ+hhD9nzhzzO1yn/gGELLTxsw5wXAi8SAaJ5gOe6bc1En54j6YCtpWSPIGa3/viiy/M30OHDi09zpx/xMa/P9Ro2OX8Dq/joTYAcfHXauwoKgEhoBKQfFQC0gOVADfc67uKBFg2lmwxjw5u3LTz32FJJAFKeKgEhIBKQPJRCUgPVALc7IoSECYqAdGjEhACKgHJRyUgPVAJcKMS4EYlIHpUAkJAJSD5qASkByoBblQC3KgERI9KQAioBCQflYD0QCXAjUqAG5WA6FEJCAGVgOSjEpAeqAS4UQlwoxIQPSoBIaASkHxUAtIDlQA3KgFuVAKiRyUgBFQCko9KQHqgEuBGJcCNSkD0qASEgEpA8lEJSA9UAtyoBLjZGQkgX2DWvqABfJIBA/0wx0C6wIBD/kmMyotKgA+VgOSjEpAeqAS4UQlwEyQBvMc0wkyje8cdd5jZAZs0aWJG8gO2/49//KOZK6Ay+Pe//y21a9eOvUoexB+GTWYeA+YzqFWrlhnOmHMKBHtmWGTZnXfeKY8++qgZvZB8lGuQ9+zxZDbGe+65x7xmaOVBgwaZ7/CjEuBDJSD5qASkByoBblQC3ARJAPPqE2ivuuoqMwwv8/Az1j7z87Ns0aJFstdee5mhgSsDJvRp3Lhx7FVyYFpk5h04++yzpX79+vLuu++agE8gf/PNN8063IsnnHCCmZuAOQ+Y3ph5D5544gkZN26cOY5MusScCMSz559/Xj755BOzbtCwxCoBPlQCko9KQHqgEuBGJcBNkAQwqx4z8hHkLMzqd+CBB8qAAQO2kwCuQUrEF154oQlMjRo1MpP6WMhnCYhMBcykPYiFrR5fsGCBCerkN5SUWYepe/kMAZPXd999t5kbwMKUv9QEIAKnnXaaCcQTJ06MLd0K24PEnHHGGfL000+bIA7sG6+ZsOiFF16QE088UcaPH2/yOt5nEqFnnnnG/DaTCnGu4KGHHpLjjjvOzJdg4wb3HEHarsNxYHu+++4785rlBHgmSJoyZYp5D2jKOPPMM40YuFAJ8KESkHxUAtIDlQA3aSEBnDsCcVAq8U0O5J3rwHVI/vX4PoKsfzmfDaC8EtC2bVszPfCkSZNKJcCWXtu0aWOmA6Z03KFDBxO4n3vuOZNfM6EQUxAzTz/rUfKltPzhhx+az86YMcPk7cxQyHKqz5nel5oHStxMHnT55Zeb6nMrFgR3ZiRs2LChmXyIkjciwXTB8PXXX5uaDGYZ5Nw/9thjcvHFF5v9RAaYTRChYYZBPk/+QtMH2810wwhA9erVTakf+WBiJGYr/Pjjj833J8JKABMUWdj+ffbZx0xmZCFvRU7Kyu9VAnyoBCQflYD0QCXATVpIwA03iPzjH9unKlXEi4qxlTwefDB4PS/ISKxK2lCr1vbrHHtsbOG2BEnA0qVLTaD829/+Joceeqjsv//+8s9//tMEbq4zSu9IAAGSqX8J2AR92sSha9euJkgzMyB5CCVhZvsD9rt58+Zy2WWXmaDNsTjggANM6RlhKCgoMCV/Ssrk+0A7PEHd1gYwRTEB2tYmUKJGImwJ/NRTTzXSwjln3xANlrdv3950ZuRvSv02/2cb2B9mTQT20dYSIADMMLjnnntuE8i559jufffd10xdzOyIbD8S8Pnnn5ttW7x4sam1OOmkk7bpRGklgFoFFyoBPlQCko9KQHqgEuAmLSTg5pvFi0LbJy+QypdfxlbyqFEjeD0vSMvbb8dW8qhTZ/t1vNJ3EIlqAq655hqpW7euKfUTRCk1V6tWzVxrBCaCJnkWgR5hsAEYCHjIA00HBGjybn8gY9rhSy+91Mzxz3chGbaTIXkyHe4InhZ+k6BJLQRQ6n/22WfN38Axvf766+WNN94wgfeoo44ywZnSO6lq1armX2oZ2I5zzz13m1I9pXW20X9OmIqYIMu5shIwZMiQ2NKtnQSRITpQ0oRAXwmguePvf/+7kSC2gSYSPue//lQCdgKVgOSjEpAeqAS4SQsJoATN+QtKO7se1f/x6wTgag6gbd7PFVdcYdrt6R+w9957mzyLfUECqHq38HmCIMGP4I4w8J6FJgOq52mLtxJg+xeQJ9fxJOYGakdikFcT8PwSQOc6C/kD0sBji9QuHHTQQaY2AhkhOPPb1FiwnywnMFNat9BGz/5wL1mofaBUzz7a5gDbhOGHJgzWsxLAdtKMgozY+MK9yXG2qATsBCoByUclID1QCXCTFhJQiZS3TwABlWBXo0YN0xeAoMkjg7TT80gcVfh8F/tFx78jjjjCBNzCwkI57LDDSpsSyMPpFU/JnWYHahJ2VAJoDqBfAYEWyKtoPmjXrp15TXU//RBsHwL2jyDNbwdJAPtL1T5NCFTpk+898sgjph+D7ffw3//+1/RVoMnAXkt8Lx3/qO632xLfJwC4R1UCKohKQPJRCUgPVALcqAS4CZIAAuYFF1xg2u3pRU+NAM+200bOfcj277bbbqU93qkup0qc5+KbNm1qAiECYdvsP/30U/NZ2tnpcU/wsgGbIPvXv/619LvIk2mv57ctXbp0kWOPPVYmTJhgXtM/gI6IdPh75ZVXzKOL9N637e509jvnnHPM9tD/gA6EiAe/RfMGvx9fqn/xxRfNNvIvYyKwD8iClQA6FNIBkc6DNFe8/vrrZl+plWCfyCvpE4Eo0LHQT7wEEHt40mDEiBGxd4JJSwnADGl34cDTQxPLs9B+QpURB48T9+qrr5qRnzCvslAJSD4qAemBSoAblQA3QRJA5zyeaac0fN9995kAS8nadoyjhE3PfTrDWRjspmbNmuYz3377bakAAHk8eT1BmQDfs2fP0nyf2gDkwH4X1zM99IkjFmoJEBFb2qYUT78CYgw1E6+99pop4fsZNWqUeVSR5TxOSLU91wDbTund38kPOB98L9uPtNC/ANnwfy/xh74C9EfgmCAWv/zyS2k+iQTQJGE7GFriJYA+FkgSMdFF2kkAJ4p2GWyJ9pqbbrpJbrzxRpk9e7ZZzkmjYwnVPDyHyQAK2JhKwFZUAhKjEpAYlQA3KgFugiQgE6Hdn3MDBH6aHBAgv8zsLPESUF7STgKwMdpiuMABKaBKBeuCrKws0yPUVq/sCCoByUclID1QCXCjEuBGJWArFFJ5IoIxCYhjjA5omx8qSsZIAFUop5xySqkE0CuUIRM5uEA7DY9u0KZCbQHVIeW9MTmAKgHJRSUgPVAJcKMS4EYlYCv0J6BvA4VV8ho7wmAYZIwE0I700ksvmUB/zDHHmH/fe++90guM50VpL+F5UjpOsANMnhB0M9BEQPsKGRuJIIAEcCBZxmcqkvhOMk4yhqDlyU7AQBi2t2jQOslOSADtZ2Ec74okfp+bgBuzsrclFRPVlVzL3GdByzM9ca8j/BQggpYnO3Et01fKDqzDNV2ZyUpA0LJMSkEErbczieNrY9eOJK4Rmil4vNF/DQH5c8pJAEGeQRjo8EemRA9LNtIGNm5Ge7Gxc/b5Uf8ADBZ6TfJdPLZBYsSp3Xff3XQY4YYOI1FDEfR+ZSRKKtSU0AuWv4PWSXbi+HAeg5YlO7EdqXS+Ui3psXGnVLqWkVk6hiHY1LRVdkqV7djV084cZ2qN6GzPOAb+a4jBjZo1a2ZGTXSRdAm47bbbTG9JW+1GOz49M+lFmQjGoo5/phKQBXqN0huUxE3Dc6lYNFX4/EZFEqUCMgUOdNDyZCdKc/ShGDlypPk7aJ1kJy40zkEYx7siid/nkSXMt7K3JRUTJQyuZWrigpZneuJeJ+MkQw1anuxEtTNNf/xNqY4CUWUmWxMQtExTOMnWBAQtS5S4Nrh27dDM/ryP5gUGQ0q5mgB6/fNUADsLVOHTuxIRAHaCBNQGENgZipHMvSz4Tu0TkFy0T0B6oH0C3HCvp1KfACSf5gDanMnPkLfKTIg+ghS0TFM4iePLI5DEsaDlQYmaA/qO0BzAPR5PSvYJ+PHHH82ADowBwLCQPPvJqE5kUPDzzz+bQRuYxIHnSBljmYEWgnYwHn06IPmoBKQHKgFuUk0CgGDA/Z4KieZahgEOWqYpnMTx5TgHLUuUqAFAFBM9opiSEgAEf8aWZqQlagEYFMJCUKHJgAEYGE+AwRgwnvKgEpB8VALSA5UAN6koAcB2cc4qM1HtzCA/jKLH30HraKpY4rgyLDGTKe3IMeb6cJGyEhAVKgHJRyUgPVAJcJOqEpAqIAH+yX2U8KFNn8cPw0QloIKoBJSNSkB6oBLgRiXAjUpA9KgEhIBKQPJRCUgPVALcqAS4UQmIHpWAEFAJSD4qAemBSoAblQA3KgHRoxIQAioByUclID1QCXCjEuBGJSB6VAJCQCUg+agEpAcqAW5UAtyoBESPSkAIqAQkH5WA9EAlwI1KgBuVgOhRCQgBlYDkoxKQHqgEuFEJcKMSED0qASGgEpB8VALSA5UANyoBblQCokclIARUApKPSkB6oBLgRiXAjUpA9KgEhIBKQPJRCUgPVALcqAS4UQmIHpWAEFAJSD4qAemBSoAblQA3KgHRoxIQAioByUclID1QCXCjEuBGJSB6VAJCQCUg+agEpAcqAW5UAtyoBESPSkAIqAQkH5WA9EAlwI1KgBuVgOhRCQgBlYDkoxKQHqgEuFEJcKMSED0qASGgEpB8VALSA5UANyoBblQCokclIARUApKPSkB6oBLgRiXAjUpA9KgEhIBKQPJRCUgPVALcqAS4UQmIHpWAEFAJSD4qAemBSoAblQA3KgHRoxIQAioByUclID1QCXCjEuBGJSB6VAJCQCUg+agEpAcqAW5UAtyoBESPSkAIqAQkH5WA9EAlwI1KgBuVgOhRCQgBlYDkoxKQHqgEuFEJcKMSED0qASGgEpB8VALSA5UANyoBblQCokclIARUApKPSkB6oBLgRiXAjUpA9KgEhIBKQPJRCUgPVALcqAS4UQmIHpWAEFAJSD4qAemBSoAblQA3KgHRoxIQAioByUclID1QCXCjEuBGJSB6VAJCQCUg+agEpAcqAW5UAtyoBESPSkAIqAQkH5WA9EAlwI1KgBuVgOhRCQgBlYDkoxKQHqgEuFEJcKMSED0qASGgEpB8VALSA5UANyoBblQCokclIARUApKPSkB6oBLgRiXAjUpA9KgEhIBKQPJRCUgPVALcqAS4UQmIHpWAEFAJSD4qAemBSoAblQA3KgHRk1ES0LlzZ7npppvMxt16663m5vMzbtw4ueeee+S0006TGjVqSE5OTmyJG5WA5KMSkB6oBLhRCXCjEhA9GSMBBNXjjjtO3nrrLRNAXnrpJfN64MCBZvnUqVPlwgsvlEceeUSysrLklltukfPPP1+KiorMchcqAclHJSA9UAlwoxLgRiUgejJGAho3bizXXXedybCBC+viiy+WFi1amNfIwaWXXiqzZ882r/Pz82X//feXjh07mtcu1q5dqxKQZFQC0gOVADcqAW5UAqInYySApgCq+cmQli5dKm3atJHTTz9dRo8eLVu2bJGHHnpInnzySSkpKYl9QuTaa6+V+vXrx179DsF5/vz5JjCSaEbYe++9zfciAgUFBRVK1Cx07drVBJeg5clOhYWFMmXKFBkxYoT5O2idZKeePXuazCGM412RxO8vWrTISEllb0sqplWrVplrGWELWp7piXudPGn58uWByzM5kdcQnObOnav3VkSJ47pgwQJTiAnrGBMfu3TpknoSQHB/9dVX5YADDpAqVaqY1LJlS1NCwcJvvvlmadSoUWztrTzwwANGDuIZOXKkaTo45JBDTDrooINk9913N80IBKcwEtJCCSFoWWUkMnJObNCyykgcH2pLgpYlO7EdbE/QskxPXMN6bNwp1e71VErkqUhS0DJN4STyrzBjF7UAr7/+upx11lmxiBlM0iXg119/lXPOOccEssWLF0vbtm3lpJNOknbt2hlBoKNgfKn/3nvvNX0EysL2CcCCwoDt0eYAN9ockB4UFxebTJxmAWV7uNcRAEq9yvbQHJCbmxt7pURB/vJlMnBABjQHXH755dKkSZPYKzFNADVr1pTbb7/dvH7qqafk/vvv36Yj4Nlnn22Mpiy0T0DyUQlID7RPgBvtE+BG+wREz4LFedKlV3/ZEnsdBikpAVTtX3XVVTJnzhzzmqcBLrroIqlTp4553apVKzn11FNN8CXDolpjzz33lLFjx5rlLvTpgOSjEpAeqAS4UQlwoxIQHUvXbJCfRi6Q51qPkjtb9pC3ekyXEXPzZbNXQK4oKSkBBDAkgCcEGAvgmmuuMWMGzJgxwyznaYBatWrJBRdcYJoGzjjjDHnxxRfNsrJQCUg+KgHpgUqAG5UANyoB0ZC7ar08+v1oOahuF9n3+c6yf+0u8q/nOslZzXpL+7E5sqmkYiKQkhIAM2fOlJ9++kk+/fRT00dg3rx5sSVbITPnkcCPP/7Y9B0ozxgBoBKQfFQC0gOVADcqAW5UAsKFZvDiTSXy/C/j5YDaWXJwvWypVv/3tN/zneTs1/vIpEUVy1tTVgKiQiUg+agEpAcqAW5UAtyoBGxPiVdKX1VYLCvWFcsm7/qBNes3Su9pefLrqIXyzZC58kHfWdK823Sp136CvN9nlixZszWWUAPwxI9j5NAGXeWQOAGwiRqBzwbMrlBtgEpABVEJKBuVgPRAJcCNSoCbIYMHydK8XU8CijdtljwvMM9aulbGLVgpA2cuk+xJi+XH4fOl99Q8KSzeer/MWb5Oav86Qe79coTc9NEQufK9gXJZiwFyTvM+8szP42TRqq1PlcxeWiCXvDNAjn6huxz5Qjep3nBrkCegX/buAJm8eI1ZL3f1ejmveV85sE5WoACQ/vV8Z2nScbKsK9r5e1YloIKoBJSNSkB6oBLgRiUgMTleqfX9dv2k25jfpGhTmH3XK86GjZtl2doN8ltegSxeud4rkW/dvoUrCuXrwXOlRc+Z8lLWFBPAH/l+tNzoBfC3us+Q1V6JHUbPWyHnvdFPTnqpp5zopeNf7CHHNukuh3pB+L/fjpLFXrCGaV7wPuXlXiZok6rUzpKqdbrIYQ2y5cFvRsq8/K0xZ/naImncabLUbTdBmnWdJh/1/U1+GDZPOozNkeFz8mVtLKDT6W/EnBVy4os95eB6XYIlwBOHt7pP94751lqGnUEloIKoBJSNSkB6oBLgRiVgeyghN2g30QuKPaS6F+yO8Eq1573RVzqOz5HydlxnvRIvMFOlvbFksxSTvKBW5AVvG7Bh9fpimbhotQz5bbl0m5wrv4xaIF8OnCOvd5smPacuMZ+DQTOXy9UtB5kS+AkvdpejvNI2Je6qdbtIPW9bbXBnveOb9pAjGnXbJrHeYz+MNj3yYYr3mxe93d8kBOG+r0bKk63HSt22E0xtwKrCrd+3bkOJ2bYx81aaWoO81RukYMMmWV9cYvbHHg/+YV/ZN/abYE9KdLhu/niIHOJtU7wAHOy9V71htvSdvjS25s6hElBBVALKRiUgPVAJcKMSsC20dT/7yzjTY/2QuABVvUFXaT8uR/LXFcuygiJPFopk7vJ1MiFnlfSfsVTmeqViGxQpaTf1SsbP/TJeangl61s/HSZXvDfQlLxpL7el3F9HLzCBj6pzSuGHeb9xuCcdBO0XO0+WglgJuv+MZXLaK71MqZx/T3+1t+lA9+83+5lH62iThwX5hdKi1wz5uB8l8fnSadwi6ett20hvexZ427cpJhWVzfDZy7196GVqA3hCgA6C7DN/0xSw1hONiqASUEFUAspGJSA9UAlwoxKwLT2mLDGB6JCAqmqC9DGNu8vJXiCnYxvV1nvV7Ch7P9tJ/v5UB3mv98zSkvsvIxfI0V5JnVL5yS/3NAHvzGZ95FQvgL/vScD6jVvniBnqBcM7Px8mD3832vSYb+KJw5vdZ8iH3jqDZy03pW1Y4wXFqblrZMGKQlNKL0mRYF4RRszJ9/Z9uHks8OSmXeXid/rLOz1/b7KoCCoBFUQloGxUAtIDlQA3KgFboQmgx+RcueuL4abdO14AbDq0QbZpGrjhoyFyyydD5Q5v/Qe+GWWeee86Kbe0R/vsZQXSarhXEh+/SPpMyzMBb/Ki1TIvv8CU2hNXlGceo2YslK879ZVVIQR/i0pABVEJKBuVgPRAJcBNJkoAbfS/LS2QRavWb2239uJxx3GL5C+Pt5O9vdJ9UPAnUWV/TONukj0x1/Rc3+xr21d2npX5y2XQwAyYOyBKVAKSj0pAeqAS4CZTJIB2/OFzVphOb407TpKr3x9kOsEVFm+tlp+/fJ3UaTNBav48Tg6sG9xrHQk47dVesrygfIO4KeVj2bJlZsrmMFEJqCAqAWWjEpAeqAS42dUlgA58L3WeIvd/PdJ0pKMN/x9Pt5czmvU279PL3cJodjQLXPvBINnn2W1rBBCAA2p1Nr32lXBRCQgBlYDkoxKQHqgEuNmVJGDNho0yYMZS0ymPKn+Yl19onmmvUruzGeyGIJ49cbFMzFllSvQEfj/mOfa5+XL9B4Nl3+c6ywG1Gds+y3wHnfbso3NKeKgEhIBKQPJRCUgPVALcpLsEMKJd6xEL5Jmfxpkq/tNe7W2en89dtTXvorq//8ylZlS8RSvXS9Gm30v+LnJWFkqnCYul5le95a0uEzwxWFH6uJ4SLioBIaASkHxUAtIDlQA36SoBtPHzDD6P3B3TpLvp0c+jfTd+NNg8Zsbz/mHQd8AgyVmcG3ulRIFKQAioBCQflYD0QCXATSpLABX1VM8zyE2rEfPNY3izl27N4xhMpl7biaa3PmPYdxi3SHJWrDe99mkKCKvf/uBBgyRvF5w7IJVQCQgBlYDkoxKQHqgEuElFCVhXXGIG0Xkte6pc2mKAGUb2sIZdTTU/z94DcsDz9uuKSswwtVGhswhGj0pACKgEJB+VgPRAJcBNZUvAho0lpmrfzhi3cdNmeafHDPnn0x3kiEZd5aSXe8mFb/WXZ73SfruxObK0ILl5kkpA9KgEhIBKQPJRCUgPVALcJFsC6IyfX1BsButhytpXukyVK1sOkleypsTWEOk3Y5n85+Oh0rL3TBk5d0WkJf2yUAmIHpWAEFAJSD4qAemBSoCbZErA4lXr5fOBc+S/342WE5r2kL97pf2D62fLuc37SqMOk2JrpRYqAdGjEhACKgHJRyUgPVAJcBOVBOSu3iDDZudL3+l5sXfEjK1/QO0sOfqF7mb62ud/nSBfDZ4jo7zS/pr1qXl+VAKiRyUgBFQCko9KQHqgEuAmLAmgbZ856tuOWSjNuk6Te78aYWbPI9lZ4Rav2iA/j1poZGDe8nDyqqhRCYgelYAQUAlIPioB6YFKQFlskT69esjGop2/15netnHHyXLx2/3NdLt/frytHNW4m9zx+XB5r9dMKajg3PCViUpA9KgEhIBKQPJRCUgPVALcLFm9Xj5r011m5a6MveNm0qJV8uWgOWYIXUbgAyTgts+Gyemv9pZ6bSdI29ELZSwj9K1aXzp8b7qiEhA9KgEhoBKQfFQC0gOVgGCWrFlvAvnF7/SXU17Mln+/2VeebD1WZi5ZG1tjK/Tk7z4516x7w0eDzSQ8hzfsKtW91HZ0jlmH3vsLPRGYl79O1m7YtcbWVwmIHpWAEFAJSD4qAemBSsD2MFPeI9+PkoPqdJED62TJQfWypWrdLnKwl656b6BMXbzGrFe8qUQatJ9oAv6hDbJl/1pZct4bfaVBh4meGCyR1evDGZo3lVEJiB6VgBBQCUg+KgHpgUrA9lCdf0h9gn7cfPleYgz+R38YXTrs7gd9fpNbPh0mnw2YLTOWrDWj+YU5LG+qoxIQPSoBIaASkHxUAtIDlYBt4VG8p1qNlX2f77yNANiEBFz13qDS9n7IlIAfhEpA9KgEhIBKQPJRCUgPVAK2wqx6K720rKBIHvhmpOxXK1gCaB645J0BMj2ub0CmohIQPSoBIaASkHxUAtKDTJaAFeuKZWruGmkzJkce+HqkGX+fMfrrtpso/3LUBNzw4RBZUbDrt/eXB5WA6FEJCAGVgOSjEpAeZJoEFBaXyLgFK+XH4fPl8VZj5bimPWT3J9vLEQ27ypOtxpgagW6Tl8ixzMFfK2sbAWA+/kPqdZGWfWbGvk1RCYgelYAQUAlIPioB6UEmSEDRphIpjj2PT3X/te8Plt0eayuHN+oqN38yVJp0niwdxy2SBfmFZh1E4X0v0B/XpIf867lOckCdbNNHoHqDrmaO/hXrisx6ikpAMlAJCAGVgOSjEpAe7KoSQLX+wFnL5N1eM6V+u4lmtj3YtHmLNMueJq9mT906PG9+cJ7AML89piwxI/3d/E5XqfPrOPPcPzUFyu+oBESPSkAIqAQkH5WA9GBXkoACL/APmrlM3ug+3YzNf0azPrJnzY5yYO0s+WXUwthaUjpWf3nYsHGTtM3qLivWFMTeUfyoBESPSkAIqAQkH5WA9GBXkoC3e87wAn9vOaBWluzzXCe5/sPB8kGfWTJ8dr4sL9jJKvwtW6RXzx5SlEL3eiqhEhA9KgEhoBKQfFQC0oN0lIAtXmAePX+FNGw/SX71lfBbjZgvt3wyVN7rPVMmLFwlS9cUVXhsfu71KKYS3lVQCYiejJEAbuz45Kes5S5UApKPSkB6kC4SwP0+IWeVNO82XS5rMUCOa9pdqtTJkv9+P1ryvGAPdP5bu2FTqJPyqAS4UQmInrSVgMLCQrnppptk+PDhsXfcPPHEEyZQ27TXXnvJ/vvvL23atJGhQ4fKIYccYt6zyx9++GEpKipfFZ9KQPJRCUgPUlUC/I4/M2+tXPnuQDmsYVc5vFE3OcJLF73VX97sMcM8578D5YEdRiXAjUpA9KS0BBQUFJiMPijl5OTIiSeeKP369Yut7Wbt2rWyfPlyk9jpr776SqpVqyZ5eXkm4F5wwQUmsBB4V61aZdYvb21AFBLQrVcv2ZBCGedv8+bJ6LFjY68qn17eeV8d0vGuKCu8868SEMzGkpKUkADu5ELvf8s2bJZRi9ZKh0lLJW99iVlWULxJLny7n5nRr1nP32TcYu/eN0uip1QCPFlStmfg4MGyxMuzlehIaQl47LHH5K677pJ77rlnm3TvvffK7bffLrvvvrsMGTIktnb5oXRCLQLfDV26dJEzzjhDvv76axk/frwsWrTIvF9eEAckICybJwMa/sEHUozgDB26bRo9GuvYuiKMG7f9OqQRIyiixlbymDQpeL1hw7gKYit5TJ8euM7CX36Rad6JLWXWrO3Xs2nh7+2oMn9+8DqctzlzYit5eFJntiVo3Zm+wVM8aWPfRrdsKQU9e277malTYyt5kHGMHLnt99g0cWJsJQ9P+MwxDVrPuxZK8aRQkKD4dbzfX+nJY38vGQoLRSZM2H490qhRXHxb16OWJ9E54dzZWqDiYpEpU4LXY989UTYQZGfMCF6PxH6CF3Rk9uzgdUj5+VvXQ4ATnTsS58HCuQtax0ubPHlEAjZ7MmA+E7COSfyWhW0IWofkv2a8wkDgOiSuT29fizZtlhV5+bJhwEAZ+G0naVL3c7n+zjfl3hrvytS23b1ryztmm0tkTr5371LzN8I7psMCvo9zZc8d+xK/3CbOvb8GMdE1Ta1aLL9AAjYMHBi8HtcM154l6BokUSPK8bBwjQetx/bYcwzcM0HrkfznONH9zn3szy/nzk28nv8ck0cEHRvW4/qMMaZzZ1nu5c+B63K9W8jDEt3v/nyBPJFjGrSeP18gj+UcBa1HvsA1ANx/vA5aj89z/wLnOtE5YXvs/c61k+h+Z//893tQXk3yXwts52+/Ba/HMfWOx3LveghbAigUhSIBBx98sFx//fVSu3btbVKdOnXk6aeflj322EMGe6a4o1C1vc8++5iSP3AAzjvvPDn77LPNv5dffrn09AJMopqApUuXSocOHeS7774z6dNPPzXbMt07KQu9i3vBggUVSnMXLxbZbTfvKHmHKS6VeGnpt9/K/CVLTCr2fjdovc1eWvb++zLfu5FJGw49NOF6+a+8Urre+tNPD1yPtPzZZ2VubL113jEKWoe0+qGHZF5svTWerAWtQ1p7440yzzuWrLfKE76gdUjrLrigdL2VDRrIloB1SBuOO650vfzXXzf7FrRe8UEHbT1+3nrLPv7YHNOg9Tbuuacs8DI11l3644+yyZPOoPXWeal/x45mvSXev8XetRW0Hr+zyLv5WC/Xu76KqlULXI/9YznrLRo0SNafcELC9Za0aSPzc3Mlx8tICs85J3A90tIvv5QFrOdlRAVXXRW4Dmn5O+/IAi9TX+hdy2vuvDNwHVJ+48aywAv+C717adXjjweuQ1rxxBOS7WXi87yMfUWjRoHrkNZ4Yr/AC/Bc//lvvx24DqnguutkoSeF/Db7FLQOiWtmkZeZftZ/htSv9UngOqT1J58sOZ5IL1qSK4uR7oB1SEXe/bPYyzTNMUS8A9YhFXuFgVwvA+SccO0ErUPiWlqSnS3zvHPc1ZOkjQHrkLhm8n7+2VwL/PbGv/0tcD2u9aVffGHWI3GNB61H4hxz7bPe+pNOClyHtKJJk9L1Ci68MHAd0qqnniq939fefHPgOqQ1d99den+urlEjcB3SOu/6ZD2+c5F3bSW6PwvPOmvr/nrrrXj55cT5wjHHmOuF9ZZ5hYdE38cxW+DJDust/eYb2fS//xu4HvlCjhf4+e087/7jddB6fH6xJzWst8Q7x8VVqwaux7nL7d7drLfYi0VFRx8duB77l/frr2a9HK+gUEiQjVvHrse9YdbzpHTdpZcGrkda3qKFTPbudeJdGLGLtNiLX61btzYFaxfeFpTNVd7F0NI7aUGUeIZzjHdye/XqFXun/Lz44otysnfz8x1AkwDt3fmeEc33bPUp76I+6qijErZFTfEyl0cffVSuvfZak6688kr5m3dzIhP0UahoGuoZ33wvs9t4xx2y2btx/GnDww/LVPoxeKVX0jpvW+PXIRU/+KDM8ALXUK/kMMRLq2vVClxv0/33y2/eBc86pPwXXthunS333CNrvIA9zgusQ70MkPXyPHGIX8+mhe++K0Ni6y166y0p8T4ftF5u8+YyxLuZWG+BJyybvEAQtN4y73zZ9eZ8/rlsfOAByfUu7KLbbttmvZX165v12OdZnigVezLiX27Tmuefl6Fe0GS96T/9JBseeSRwvXWeaA73Mn6O89R27aTQC2hB6yE93bzlQ71raJJXcil45pnA9Yr++18Z44kn3zfeyxTWJDgnnLvxXbvKMG8bx3k3J/sVtB7nbpInHcO862WMF3iWN20auB7Hf9ovv5j1Rg0YIHmvvRa4HmnmDz/IMC+DGenJxyIvWAStQ5rtBZzh3nojPKlZ8MEHgeuQ5nnLsrxjMszLDOd4nwlah7TIy4z4Lq7/md9/H7gOKde7Bkd44s82TvcyRLnvXpEH7xPx9nH+1TdLr9Mul5xr/iPLX35JRnv78PAXA+Q/tX+QvOtvkZKA71vRsKGM9oI/x2aclxHHL7eJ+4dzx3qsH7QOqaBmTfM9rDfCu3YSXdNcSxM9OeI6zPL+LfTOedB63O+T27c31wzfWfjkk4HrFXv3BOfYrOd9J9d40HpsD+eYe4l1V3hSHbQeaY4XSOx6S196KXAd0vwPPyy937mnE93vXE/2Pl743nsJjw3Xp72PR7/6qqwNyAdJyz0RZdv4vt++/jrh/b6yXj1zvfB95Ikc06D1OGYjvOuU9aZ619Z6r2AStB75wigvr+d+n+zdf7wOWo/Pj/XiE+tN9IQv0Tkp8rZngrecfRnXo4es8rY3aD32b7JX+GS9MX36BObVJPKFad72cx2wnUsT5NX2Whjg3SfZ3u/Hx6GdTaO97Xvfy8/P8iTNRbkk4FdvR370TloQdNr76KOPZI6/erAcUHWPPLxNaSMBdDrcb7/9TF+B8lBcXBxqcwBkexkNLd5oij/F90vmdfw6Nvkp73rUfcQv57MzPEscQ1VnjKD1bPLjWi++niVoHVLQej28C3eFdy79+xV/bOz78Sl+vUTHpjzr8d6ygoJt+gRU5Pts8hPGsfYT9noQtA5p45YtW5sDaIbwvR+f4glahxR/LcxZVyKdZq6QV/vOlVu/Hy/7Ne4tzfrNE1shP3tZgfSbuUzmr98s9Eoo6/vil9sUf+6C1iHtzHo0BxR4x6es9YDXQeuR/JR3vVS8ZvznZKAnUotWrgzcn509d4mOTaqvR/IT1jnJX7489OYA+uqF9nQA7fdhQvs/TQG5ubmxd7aHYM6TA6xbHuhEGMnTAeV8OiEZ/DZ7dmo9HeCZ8Gp/34hKZIWXSWnHwGA2btoUScfAWXlr5YWOk+U/nw6TI17oLn99qoOc8GJPeeSHMdJzal7pWP6pTmnHQNsmrGyDeTogz9c3QQmdlO4YSNAJWwKuu+46ufvuu2Ov6F+xyfwOjwkS/GnPoDmAJwfK20FQHxFMPvqIYHoQ1iOCuavWy+Sc1aajH0zLXSP/fLqDHPVCN3m81RhpPWK+jJ2/0kzfm06USkCItYi7EvqIYPSktASceeaZprMCzJgxw3QGrIgU0Jb/17/+dZsdpvMfnft43PD444+XU089Va655hqzE5X5iKBKgBuVgPSgIhKwen2xdBy/SJ5uPU4ufLu/3PjRYFmyZus9gQxkT1ws03PXpl3g96MS4EYlIHpSWgKokrdt/iNHjpTTTz+9QoGRG43vi8+QqM6fPXu2TJs2TWbNmmUOyo6gEpB8VALSg02bNkp3TwJKSsonAZtKvGt/cq481XqMnNu8jxzftIccWCdLqjfMloe+HSnL1qbOPREGKgFuVAKiJ6UloGrVqjJv3jzzNz0O6W1IJ7xUQyUg+agEpD6bSrbIiNlL5bXvu8rouculZPP2NWubt2yRNes3Skms1o0peh/4epQcXK+Lqeq/+8vh0mZ0juSu3iDri0vM8AW7EioBblQCoielJaBKlSoyduxYM8ofjwKedtppps3eP/Jf2B2OdgaVgOSjEpC6EKiHzVku17w/SA5r0FWq1c82/9740RAZNXeFmUt/VeFG6Ts9T577ZZwc6QX7ft7fgDh0n7xEvhkyT5Z4gR9J2JVRCXCjEhA9KS0BtNNTG8C4/gcddJB5bI+/bWKsfyShslEJSD4qAakLHfTOa97HTMCDANhUtW4XOfO13vLEj2Pl1Fd6yf61OpvS/mne318PmRtYU7CroxLgRiUgelJaAhgSmMyeLySR0dq/Sdw8qRAIVAKSj0pA6lL71/FSpXaWHOITAJuo5j+0Qbac07yvPPDNSPly0FyZn18Y+2TmoRLgRiUgelJaAtIFlYDkoxKQeqwoKJaBM5fJ5S0GGAmIFwDSgd77TNU7dLZOCgMqAW5UAqJHJSAEVAKSj0pA6rC8oEg+6vebPP7jGPm/V3vJQXW7BAoAiSaAOz4fJktjc/RnOioBblQCokclIARUApKPSkDy2bR5i0xfskY6T1gsXwz8fbjuKYtXy9GNu8kh9brIrZ8OM1X9VPsnkoCaP4+Xoo120NLMRiXAjUpA9KgEhIBKQPJRCYgeOuotWFEoXSflypvdp8sjP4yWK98baAL+fl4wt235azdslD7T8qTv9KUy31u/w9gcObFpj+2aBA6olSWnvNxT+nnrKVtRCXCjEhA9KgEhoBKQfFQComF14UbzmB4sWVMktX6dIMc26S77Pt9Z9nimg5zzeh955qdx8v2webJ6ffDonQz48/OoBfJ/r/WS/bzPVambbf49y/ts+zE5sjFNxvVPBioBblQCoidlJeCHH34wM/21aNFC3n33XXnvvfdM8v/dvHlzM25AZaMSkHxUAsKBavkRc/Plg76/yb1fjTQl/THzV5pla9Zvkje6TZc7vxguXw6aI6O99xd6JX2Cf1mP79N0MHf5OvlpxFx55rOu8uuo+abmIBMfA3ShEuBGJSB6UlYCHn74YbngggvkkksuMf8yJsCBBx4oZ599thx++OHyz3/+U4499liZPn167BOVh0pA8lEJ2HkoqfecskQe/HaUnNu8r5l974hG3UyHPp7bp7QOhGtG6aO6n6C+MxQVF0uXrt1k48bKH9QrFVEJcKMSED0pKwEEQOb1Z1z/F154QR577DGT6fMewfaNN94wouCaEjhZqAQkH5WAsqHUXbxps+R4pfeO43PMID6WFj1nyv61skzwv/r9QfJa9jTpN2OprFxfXNocEAabNsbmDkiBkT1TEZUANyoB0ZPyfQIKCgrkuOOO225aXwIutQSDBw+OvVN5qAQkH5WAYBiSd9Gq9dLDK+k37jRJLn6nv1StkyWHNewqL3aeEltLZGLOamnnlfjzIn5UL6yphHdVVALcqARET1pIwGGHHSZdu3YtzUiY4peAdNJJJ8mwYcPMe5WJSkDyUQkQKS7ZLPnrireZSvenkQvMrHv7Pt/JjMn/f6/1lktbDJCaP4+T7InJrzVTCXCjEuBGJSB6Ul4CyDxoDmAegVdffdV0GKRz4HnnnScPPPCA5OVtnXikMlEJSD6ZKAFMpsPAPNOXrJX+M5ZKyz6z5O4vhsuzXoC38Pjd7Z8NM+PzMyTv2AUrveup8jrjqQS4UQlwoxIQPSkvAUBG8sEHH8jVV19tgv/FF18sjRs3TgkBAJWA5JNpEpCzcr18MWiO/Pe70aZ0/7en2ss+z3WSk17qKU+0GlM62976jSWmSSBVUAlwoxLgRiUgetJCAizFxcWycOFC0xyQSqgEJJ9dVQIKi0tk6uI10mn8Imk7Jsf05AdK/id6Af/wRl3lpo+GSJ02E+XTAbPNAD05KwsrtbTvQiXAjUqAG5WA6EkLCSC4vvbaa3LdddeZpwJgxIgR0rp1a1m1apV5XZmoBCSfVJKANatWysD+OycBDJwze2mBdJmwWN7uMcMr1Y+VK94daIbdPeHFHjI1d41Zb+W6Yuk2KVd6TV0ic5atS9mgH49KgBuVADcqAdGTFn0CEIDLLrtMatSoIVdccYV5nw6Bl19+uUyYMMG8rkxUApJPKknApLm58l3n3uZ5+vLAqHxU20Pemg3yuBf4eT5/j2c6yt41O8qFb/eX534ZL7+MWmj6AKQzKgFuVALcqARET8pLAIGVpwAIQNws9AugOaCoqEj+/e9/y8CBA2NrVh4qAcknFSRgUs4qM3PeRW/3lTNe7irXfjBIPuw7S9bGDae7dsMmM8Xuu71nyj1fjvTSCBk2O98sK9q0WWq3mSAPfjNSvh06T0bOzTcj7SUakjfdUAlwoxLgRiUgelJeAhgsqFq1auYm6dKli5EAIODSSZCLpLJRCUg+lS0BExaulH+/1c+MsndgnS7ev9lSpU6WHFIvW17qPEUKNmw0bfr3fT1CLvDWO+XlXlK9YVf5l+nM18PMxGdZVbjRpHSp4t8RVALcqAS4UQmInpSXAALhVVddJa1atTKBkb+5cXhU8KKLLpIZM2bE1qw8VAKST2VLwL1fjZCqdb2g75slj3SQ994RXrAf+ttyqdduopGEwxpky40fDZE3u8+Qwd77PNfPSH6ZgEqAG5UANyoB0ZMWHQPZwGOOOUbOPfdcM28AtQFVq1aVb7/9NiWeFFAJSD6VKQGz8grkjNd6mwDvFwCbKO2/1WOG6dHfZ3qerCgoNmPv28f4MgmVADcqAW5UAqInLSQAGDaYDoIPPfSQ1KxZMyVGCrSoBCSfypQA2u1PfaWX6cEfJAFMu0stAH0BMh2VADcqAW5UAqIn5SWATKRnz56BJf4hQ4bI8uXLY68qD5WA5FOZErBwxXo5q1mfhDUBDOLTotdM2bgp80r+8agEuFEJcKMSED1p0SeAaYNLSrYfBe3000+XXr16xV5VHioByaey+wTUaTtBDqRPQL1tBWD/Wp3l1Jd7yYg5W3v/ZzoqAW5UAtyoBERPSkvA+PHjpV+/frLPPvuYi2HUqFGliYyF+QSGDh0aW7vyUAlIPpUtAYzSd/MnQ2Vfr9S/nxf4D6iTLf96fuswvt8OmWse/VNUAspCJcCNSkD0pLQE3H777XL++efLH//4R7nwwgvl0ksvNdMHM3DQ2WefLU888YQsXbo0tnbloRKQfCpbAmBa7hpp2XumPPbdCLnxne7yQsdJ0ntqXulQv4pKQFmoBLhRCYielJYAbo5ffvnFiEDbtm2lU6dOpYlmADY+FVAJSD6pIAGW+bnLpEP3PlKyCz7nX1FUAtyoBLhRCYielO8TAASgVEYlIPlUpgRkTVwsPwyfVzqk7+pVK2VAv+inEk5HVALcqAS4UQmInrSQAGYPbNGihdxxxx1yzTXXlCYGC5o+fXpsrcpDJSD5VJYEFBZvkv98MlSqN8yWCTkrzXvJmEo4XVEJcKMS4EYlIHpSXgLIRBgb4Nhjj5XnnntOGjduLC+88IJJtWvXlgULFsTWrDxUApJPZUlAm9EL5dgmPeSGj4bImtj4/ioBiVEJcKMS4EYlIHpSXgKYO+DQQw+V0aNHmxoBMhV/0hEDk4NKgJiZ/+79aqTsVbOTdJ/ye8akEpAYlQA3KgFuVAKiJ+UlgNkC6Rg4Z86c2Duph0pA8qkMCeg4frEc17SHmQeA8f8tKgGJUQlwoxLgRiUgelJWAhYuXCizZ8+WWbNmSbNmzeSuu+4y7f+8ZxPLyhsoaTaYMGFCaZo0aZLMnDnTZOCAbPCdEydOlPnz5+9QDYNKQPJJtgSsK9ok//1+lPzzmQ7Sadwi8S6Q2BKVABcqAW5UAtyoBERPykrAnXfeKaeccoqcccYZcuaZZ8pBBx0khxxyiPlim5hUiGBeHurXry/HHXecSccff7wcfPDBsttuu0nr1q3NDfjhhx+a32M5IxF26NAh9smyUQlIPsmWgIk5q0wNwG2fDZPFq7c9LyoBiVEJcKMS4EYlIHpSVgIYKZBAbMcFyMrKks6dO28zVgBjB6xcubWHdllQsreJIYg///xzIxEEcAIuwZ9ZCQsKCuStt94ywlHeTocqAcmnMpoDxi9cZWSAGQH9qAQkRiXAjUqAG5WA6En5PgFRQMZ0wQUXyLPPPmte16tXT26++WbT8RCQhCOOOMJIQXmg86JKQHKpDAlIhEpAYlQC3KgEuFEJiJ6UlwACM0G6adOm0qBBA6lbt66p2m/SpIl5XPCdd97Z4eDE3AN77bWXTJkyRQoLC834Azx+6Oeqq64yv1eevgFsIxIQ5o2MBFgpSQXoLzF27NjYq8oHCUC+ombNhk3SYWyODP5tuSSaDmDVqlUqAQlAqJEAgp0SDBJAnyRlewYPHix5eXmxV0oUMBNv2BJATX6oEnDdddfJH/7wB9M34NZbbzWDBP35z3+Ws846ywwadNJJJxnzKC9PPvmkXHzxxebv/Px8UwuAaPj5z3/+Y8QgSAImT54sNWrUkCuuuMIk5jP429/+ZgLBsGHDzKRGFU00f3ADBC1LdmKfOL5kVmHtX0VTly5dZODAgZFuz6gRw+T7bkPl6CY95bb3e0mP/kNkxPBtf4/fZzvYnlQ5NqmUmO6ba5l/g5ZrSq17PZUS91PXrl1NgNJ7K5rEcR0wYIBkZ2eHdoxHjhwpLVu2NPHZxQ5JABMFvfbaazJ37lxjLTw58NVXX8kDDzxgMhdqBq6++urA6YbjoQ9B1apV5fvvvzev6QNw7733ytNPP21eW5AEah+CJIDqEzL9n376ySS2Zc899zRPGyxatEhycnIqlOiLwElhf4OWJzuxT4zTQNVcGPsXRqKmJKzjHZQWe987c848eeqbgfKv5zvLqx3HyaLcxdutx+/z1AqClCrHJpXSvHnzzLXMEzdByzM9ca8T6HgEOmh5JifuJwofFLr03oomcVynTp1q5uIJ6xjTfENcpFO/i3JLAEH66KOPltzc3Ng7W6G9nECNdSxevNj09qf9sSwI2Kxr2+8J8q+88opceeWVploXqGZmnfI+IUAzgDYHJBeaA+iQGSVTl6yV6g27yZXvDZSZeYmbHhBLbQ4Ihr4A2hzgBoFMpf4/qQQFD20OiJYomgPID0NrDiBY00kvPiBTMj3xxBONBGAfRx55ZLk6H51zzjnbtf9ThcHjgS+//LIJdo8++qicfPLJpWMIlIU+HZB8ou4YuGFTibzUeYocULuzNO82zZPF2IIAtGNgYrRjoBvtGOhGOwZGT8p3DKSKn0f29ttvP9MO37x5cxPEGUqYYE1NAVUPzC9QVnPA+PHjzXgDDAjkh9oAMqqzzz7b/M5ll1223TouVAKST9QS8NuyAjm0QbZc8s4Amb7EXeOgEpAYlQA3KgFuVAKiJy0eEeRG6dmzp9xzzz1y6aWXmo58P/zwQ8rcOCoBySdKCdjsSeFXg+bI0Y27SdNOU2LvJkYlIDEqAW5UAtyoBERPRo4TEDYqAcknGc0BfaflOfsCWFQCEqMS4EYlwI1KQPSkrATw2AyZK1X8rVq1Ckw//vij6dRQ2agEJJ+oJWBHUAlIjEqAG5UANyoB0ZOyEsB4ADx6xSAadv4Af+I9xvgv79wBUaISkHyikoCclYXSbVKuLF5V/kxZJSAxKgFuVALcqARET8pKAI9dUQtApz1K+2T4BEUCLa95nI91UiFzUQlIPlFJQNPOk+WIRt3kp5HlmzcCVAISoxLgRiXAjUpA9KRNnwAGzvnkk0/MwBrAY4E8zqcSkBwyQQLmLV8nZ7zWW45q3E2mLi7/GAQqAYlRCXCjEuBGJSB6Ul4CqAkgg+X5fr6UZgLg4njqqaeMHFQ2KgHJJwoJeClrilSrny112kyIvVM+VAISoxLgRiXAjUpA9KS8BBAImTuAsYi//vprMzwwcGEwh8Dw4cPN68pEJSD5hC0Bc5avkwvf6i/VG2bLb0sLYu+WD5WAxKgEuFEJcKMSED0pLwEM4VutWjUzKBCBEQmgdoCb59xzzw1943cGlYDkE7YENOs6TQ6ply312k6Uks1lzxzpRyUgMSoBblQC3KgERE/KSwCBlVkCeVKA/gC2JoBJJc4//3wzLXBloxKQfMKUgKVriuSGDwd7EtBFZuTu+PTEKgGJUQlwoxLgRiUgelJeAsg8XnzxRbn++uvNkMEXXnihGT3w8ssvl4cffthkwJWNSkDyCbsmYPbSAsmauEg2FJc9E2U8KgGJUQlwoxLgRiUgelJeAoBHBZnch/kCDjzwQJPuu+8+84RAKqASkHyi6Bi4s6gEJEYlwI1KgBuVgOhJaQlo37696fjHXOQ2E2FaSfoJpBIqAcknDAnYskVkeu4ambxotRRv2vmpblUCEqMS4EYlwI1KQPSktARccsklcswxx5iq/8cff9w8HcBF4ZeCVEAlIPmEIQE5Kwrl+g8Hy2XvDjBjBOwsKgGJUQlwoxLgRiUgelJaApYuXWoy+zfeeEPuvPNOOeuss0y69tprpXbt2vLFF19on4AksatJALUAPw6fL/s810meaj1GVqwrii3ZcVQCEqMS4EYlwI1KQPSkRZ8AC5k+nQIffPBB+ctf/iJ/+MMfZOjQobGllYdKQPKpqATkrl4vt3823AwO1GtqXuzdnUMlIDEqAW5UAtyoBERPWkgAowK2adNGGjRoIHfddZeRAJ4MaNy4seTm5sbWqjxUApJPRSWgw7hFslfNjvJEq7GyZsPG2Ls7h0pAYlQC3KgEuFEJiJ6UlgCaAXg08KabbpInn3xSmjVrJj///LMp/S9cuNA8NZAKqAQkn4pIwNK1G+TOz4dL9YZdpdP4RbF3dx6VgMSoBLhRCXCjEhA9KS0BPAr417/+VR566CHJysoy0wqnIioByaciEjBg5lL5xzMd5IFvRlW4FgBUAhKjEuBGJcCNSkD0pLQEEHhat24ttWrVMhMIHXnkkXLHHXfIZ599JjNmzDDDB6cCKgHJpyISsK5ok/SYssSTgWWxdyqGSkBiVALcqAS4UQmInpTvE0CgLy4ulsLCQpk5c6Z8/vnnRgSqVq0qVapUMcMHVzYqAcmnon0CICyHVAlIjEqAG5UANyoB0ZMWEsANkp+fb0r/X331ldx///2mVmCvvfZKicCkEpB8dkYCNpVslimLVktBUbgBSSUgMSoBblQC3KgERE9KS8DUqVPlm2++kccee0zOPvtsOeCAA+TYY4+Vu+++28gAgYmbqLJRCUg+OyMBQ35bLgfV7SJNOoZbe6QSkBiVADcqAW5UAqInpSXgzDPPlOOPP15uvfVWadmypYwdOzYlgn48KgHJZ0clYFPJFrnri+FyeKNu8uWgObF3w0ElIDEqAW5UAtyoBERPSkvA6NGjpaCgIPYqdVEJSD47KgFDZ+ebqYKveG+grF5f8ScC/KgEJEYlwI1KgBuVgOhJ+T4B6YBKQPLZEQmgX8ndXw6Xwxp0la8Hz429Gx4qAYlRCXCjEuBGJSB6VAJCQCUg+eyIBAyatUyOfKGbXPhWv9BrAUAlIDEqAW5UAtyoBESPSkAIqAQknx2RgBrfjZL9a3WWr4aE2xfAohKQGJUANyoBblQCokclIARUApLPjkjAhJxV0rLPTFmzoTj2TrioBCRGJcCNSoAblYDoUQkIAZWA5LOjHQOLN0X3VIlKQGJUAtyoBLhRCYgelYAQUAlIPuWRgMWrNsiaCPoAxKMSkBiVADcqAW5UAqJHJSAEVAKST1kSsHnLFqn5yzg5+eUeMnb+yti70aASkBiVADcqAW5UAqJHJSAEVAKST1kSMPS35XL2696F+FpvWbwq2gxWJSAxKgFuVALcqAREj0pACKgEJB+XBGws2SwvdJgke9bsIJ/1nyPF3usoUQlIjEqAG5UANyoB0aMSEAIqAcnHJQEj5+bLOa/3kTOa9ZbZy6IfcVIlIDEqAW5UAtyoBESPSkAIqAQkn0QSsH5jibyaPVX++XQHeb/PLCmK8KkAi0pAYlQC3KgEuFEJiJ6MkwAypKZNm0r9+vXNDIUEcJg9e7a0aNFC6tatK88//7xJn3/+uRlytixUApJPIgmYmrtGzn2jr5z2Si+ZvChxn4EwUQlIjEqAG5UANyoB0ZNREtCsWTO5+uqr5emnn5batWvLiy++KDk5OWZZ586dpXr16maa4iZNmkjDhg2NJKgEbCVdJGBZQZF8NnC2tBuTk5RaAFAJSIxKgBuVADcqAdGTMRLQr18/Of3002XIkCGxd8TceEVFRebvrKwsueSSS2TatGnm9Y6wdu1alYAk4+oTACWby5a3sFAJSIxKgBuVADcqAdGTMRLwxBNPmFJ+48aN5dJLL5WHHnpIRo0aFVsq0rVrVznqqKPkpJNOMrUFr7zyignuiaCGwKZVq1YZCbDTHvuX7UwqKSkxGScSELQ82QlmzZplpnaGoHWSnZAAjvtWtkjRxhJZtGKdbPKOnSXoc2EnyM/PL5WAoHUyORUXF5trGRkIWp7piXsd4S8sLAxcnskJkIDc3Fzzd9A6miqWYOnSpaUSELTOjiYgf045Cbjmmmvk73//uzzyyCPSvn17qVOnjpxwwgmlpdsJEybIBx98IO3atZMff/xRzjrrLLn++uvNTRrPyJEj5YILLpCqVauadOCBB8ruu+9umhSw+jBSmN9V0dSzZ08jSV26dDF/B62T7MTxIfPk7149e0irzj3l/Oa95N6Pekm77K3vxX8mqkSQS6XzlWpJj407+a9lTb8n8hpqaLm/gpZrCidxfDnOQct2JlELQNP72WefHYuYwSRdAi6//HKpVq1aqVUuX75cbr75ZtMBEKiWs1WW/E3191577SUDBw407/mhCWHRokUyb948kyZNmiR77723qVbB6GkWqEiiRoETs3LlysDlyU5UVU6dOlVGjBhh/g5aJ9mJDCIvL88c7zUF6+SDXtNkz5od5dmfxnjnp1DWFwZ/LuzE7y9evNiYbxjnfldLNNlwLdNvJmh5pid7r1ObFLQ8kxN5zYABA0weq/dWNInjunDhQlOTGdYxJj5mZ2enXk3AnXfeKVdddVVplT0bigDceuut5nUQhx9+uOkcWBbaJyD5+PsELFu7Qc5stnVcgEGzlpv3kon2CUiM9glwo30C3GifgOjJmD4BH3/8san+J5gBJflrr71WGjVqZF4jBzaIc2OOHz9e9thjD9OhsCz06YDk45eAb4bMlf1rZcnjP1bO9qkEJEYlwI1KgBuVgOjJGAkgo77sssvkjjvukC+//NI8JnjeeefJzJkzzfJWrVrJs88+a0r+H374oZxzzjmmlqA8mZdKQPJBAtZ4ErBiXbGc/XpfOeXlnjJo5rLY0uSiEpAYlQA3KgFuVAKiJ2MkAGj7qFevnlx33XXy3HPPyZQpU2JLxLT916hRw9QO0HRAJ8Hy3pgqAcnHSMCa1dJ2TI7s81wnefDbkbElyUclIDEqAW5UAtyoBERPRklAVKgEJB8rAUN+Wy6NOkyqlL4AFpWAxKgEuFEJcKMSED0qASGgEpB8+vftLesLfh/HYXMSBweKRyUgMSoBblQC3KgERI9KQAioBCSXtRs2yqftesngaVuHfK5sVAISoxLgRiXAjUpA9KgEhIBKQHJYvX6jfNh3ltz40RA5/aWucv4bfeSxH0bL9Nytk0BVFioBiVEJcKMS4EYlIHpUAkJAJSB61ngC0LjjJKlWL1uq1M6Sg+pmS9U6XaRKnSy5/N0BZvbAykIlIDEqAW5UAtyoBESPSkAIqARET9bExVK94dbAX61+dmk6xEsH1c2SR77fOq9BZaASkBiVADcqAW5UAqJHJSAEVAKiZX1xiTTpNEX+9VynbQTApqp1u8h5b/SVucvDOf47ikpAYlQC3KgEuFEJiB6VgBBQCYiWVYXF8uzP42Tf5zsHSsBBngQwbPC4BStjn0guKgGJUQlwoxLgRiUgelQCQkAlIFqKNm6W17KnmUGBgiSAmoAL3uwni1dVTkaqEpAYlQA3KgFuVAKiRyUgBFQCoqf/jKVyfNPuZo4AvwAcXK+LVK2TJQ3aT4qtmXxUAhKjEuBGJcCNSkD0qASEgEpAcvh0wG9yfJPusu9zneSAOtmyX63ORgIe/Gak5KwqjK2VfFQCEqMS4EYlwI1KQPSoBISASkC0jF2wUrInLjZ9A/i3TpsJct1b3eThb0d4YjBbcldX7nFSCUiMSoAblQA3KgHRoxIQAioB0UE7/9UtB8k5zfvI+IVbO/5t3LRZ2mb3koVL883rykYlIDEqAW5UAtyoBESPSkAIqAREQ7EX7F/Jmmo6BD7RaqwsKyiKLRHp16e3FKyt3JECLSoBiVEJcKMS4EYlIHpUAkJAJSAa+k5bah7/u+jtfjItbkRAZhFcvXp17FXlohKQGJUANyoBblQCokclIARUAsJn+doiueCtfnLUC93k+2HzYu/+jkpAeqAS4EYlwI1KQPSoBISASkC4bNkiUrftRKlaN0se+2GMbCrZfppglYD0QCXAjUqAG5WA6FEJCAGVgHAh6DfqOEkueru/zMsPPqYqAemBSoAblQA3KgHRoxIQAioB0bB0TeL9VwlID1QC3KgEuFEJiB6VgBBQCQiHok2bZb5X8t9Ysjn2TmJUAtIDlQA3KgFuVAKiRyUgBFQCwuGnkQvkkncGSM+pZd/0KgHpgUqAG5UANyoB0aMSEAIqARVnZt4aufDtfmYyoPZjc0znQBcqAemBSoAblQA3KgHRoxIQAioBFWPtho1S8+dxss+znaRJpymyuSwD8FAJSA9UAtyoBLhRCYgelYAQUAmoGD8Ony+H1s+Wq1oOKvd0wCoB6YFKgBuVADcqAdGjEhACKgE7D5MDXfh2fzMtcO9peVJ2HcBWVALSA5UANyoBblQCokclIARUAnaeLwfNkX8910madZ1WrqcCLCoB6YFKgBuVADcqAdGjEhACKgE7z9qiTTJw5lLJWbljmaBKQHqgEuBGJcCNSkD0qASEgErAjkPnv3L0/0uISkB6oBLgRiXAjUpA9KgEhIBKwI6xsrBY3u4xQzqNWyQlm3fOBFQC0gOVADcqAW5UAqJHJSAEVAJ2jF9GL5C9nu0ot306TBaV82mAeFQC0gOVADcqAW5UAqJHJSAEVALKz4y8tXLyyz3lpJd6SvbExbF3dxyVgPRAJcCNSoAblYDoUQkIAZWA8rF+Y4k8+O0oObBOlrycNTX27s6hEpAeqAS4UQlwoxIQPSoBIaASUD6+GDhHqtTKkhs/GiJLVlds31QC0gOVADcqAW5UAqJHJSAEVALKZuGKQjmmSXc5tnEPGThrWezdnUclID1QCXCjEuBGJSB6VAJCQCWgbFasK5ZPB8yWj/r9FnunYqgEpAcqAW5UAtyoBERPRkkAGdJ7770nl112mZx33nlSq1YtcwAsEydOlIceekjOOeccefzxxyU3Nze2xI1KQPlgbAD6BYSBSkB6oBLgRiXAjUpA9GSUBNx3331y4403ynfffScdOnSQH374oTTQT58+XS699FIjAW3atJGbbrpJLr74YikuLjbLXagEJGbE3HyZlLPKE4DYGyGhEpAeqAS4UQlwoxIQPRkjAT/99JOce+65Mnv2bCkp2Voa5V9uQnjnnXdM0J85c6Z5vXTpUtlvv/2kc+fO5rWLtWvXqgQEsKpwo9z5xXCp3qirjJ63IvZuOKgEpAcqAW5UAtyoBERPxkjA/fffL3fccYepDTj66KPlkksukbZt25ZKQI0aNeSJJ57YJrO6+uqrpUGDBrFXv1NUVGRqEBYuXGjSlClTZO+995bly5ebm7mwsLBCqaCgwGScq1atClye7ISMTJ06VUaMGGH+DlrHn9Z7qXhjkbyePUWq1u0it340SBbnr5YNIRwbm3r27Cl5eXmhHO+KJH5/8eLFRkoqe1tSMVFLxrWMKActz/TEvY7wI5JByzM5kdcMGDBA5s+fr/dWRInjmpOTYwoxYR1jas+zs7NTTwKuuuoq+eMf/yhNmjSRSZMmyQcffCCHHXaYMRYC/8033yyNGjWKrb2VBx54wDQPxEMwPP/88+XAAw806YADDpDdd99dOnXqZKw+jEQNBJlD0LLKSJzULl26BC6LT3179ZA3f8iW4xtlybFNussX7XpKr57B6+5s4vgQXIKWJTuxHWxP0LJMT1zDqXYtp1rS45M4ZWVlSdeuXQOXaQonhZ1/USBq1qyZnH322bGIGUzSJeCKK66Q448/3pg3YCwE+ccee8y8vvXWW6V+/frmb8u9994rDz/8cOzVtlCDYNPKlStNc4D97i1mcpydTzRTcGIws6DlyU4wa9YsGT16tPk7aB2bYMnq9XLv1yNl72c7mamCizdtbX4JWn9nExcaNSVhf++OJsjPzy9tDghaJ5MTpQKuZZoFgpZneuJeRwBoSgxanskJaA6w/baC1tFUsQQ0fdvmgKB1djQB+XPK1QRQor/yyitNYAUyp2effdY0EcCTTz5ppMDfEZA+BK+99lrsVWK0T8DvbNi4WVr0mil7PNNBHvlhtCxbWxRbEi7aJyA90D4BbrjXKT3ZfEnZFu0TED0Z0yegXbt2Ur16ddPGxI03YcIEU13x7rvvmuU8KXDaaadJr169jJ1zUPbcc08ZNWqUWe5Cnw74HToDNu82Xa5pOUhGzs2PvRs+KgHpgUqAG5UANyoB0ZMxEsBN9vTTT5snACjx33DDDaaTIJ3LgAPB8osuusg0A5x55pmBnQKDUAnYljXrN8rspQVSvGlrp8soUAlID1QC3KgEuFEJiJ6MkQAgWDMGQIsWLcwjg1YALPTu//nnn83jgjw5UN4grBLgbfOWLWZo4GShEpAeqAS4UQlwoxIQPRklAVGhEiAybHa+XP7uAPlqyNzYO9GiEpAeqAS4UQlwoxIQPSoBIZDpErCqsFiu8ATg0AbZ8tnAObF3o0UlID1QCXCjEuBGJSB6VAJCINMloGlnBgXKkge/GSnri8OZG6AsVALSA5UANyoBblQCokclIAQyWQL6zVgqhzfsKmc06y0z89bG3o0elYD0QCXAjUqAG5WA6FEJCIFMlYDla4vk32/1MxLwzZB5sXeTg0pAeqAS4EYlwI1KQPSoBIRApkrA5wPmyHFNustD346SLd5/yUQlID1QCXCjEuBGJSB6VAJCIFMlYMOmEvl0wGxZuCL5GZhKQHqgEuBGJcCNSkD0qASEQKZJwKaS6AYBKi8qAemBSoAblQA3KgHRoxIQApkiAePHjZX1GzfLF4PmSJsxC72/k/MkQBAqAemBSoAblQA3KgHRoxIQApkgAfPnzJbpk8dLx/G5cki9bLnlk6GyYt3vky0lG5WA9EAlwI1KgBuVgOhRCQiBTJCAoRNmyHtt+8uVLQdLtfrZ0nHcItlUktzOgH5UAtIDlQA3KgFuVAKiRyUgBHZlCRg0c5np/X/Rm73lxMZdZP/aWfLf70bL6vUbY2tUDioB6YFKgBuVADcqAdGjEhACu6IEUMbvMy1PTn25lxxYJ8tLXeTgetle6iLHNO4urUbM15qAGCoBiVEJcKMS4EYlIHpUAkJgV5QAHv+76O3+UrVuF1P9709VPSk47ZVesiCJswbGoxKQHqgEuFEJcKMSED0qASGwK0rA0NnLtwv+/rR/rc7SbkyObK6kygCVgPRAJcCNSoAblYDoUQkIgV1RArpMXBwY/G2ib8DnA2bLpkqyAJWA9EAlwI1KgBuVgOhRCQiBXVECek5ZEhj8baImIHtirtYEeKgEJEYlwI1KgBuVgOhRCQiBXU0CFuQXyr1fjpADvNJ+FS9tJwDPd5aL3+4veWsqT1JUAtIDlQA3KgFuVAKiRyUgBHYlCZi1tEDu+Hy47OsF+ls+HSrXvD9IDqiVJft5af/aXWQ/7/1zXu8jPacu8bZTnw4AlYDEqAS4UQlwoxIQPSoBIbCrSMCUxWvk9s+GyT7PdZL/fjdKclaul+lL1kjzbtPlzk8GyM0te0vTzlNlxJz82CcqD5WA9EAlwI1KgBuVgOhRCQiBXUECZuatlds/HyZ7PtNRnmg1Rpas3jZTGj15hvQZMir2qvJRCUgPVALcqAS4UQmIHpWAENgVJID2/fu/GSXP/jzeE4Dtf3funNkyfuy2UwlXJioB6YFKgBuVADcqAdGjEhACu0pzwKKV62VVYfBwwP6phFMBlYD0QCXAjUqAG5WA6FEJCIF0lYAJC1dJvXYTZPLisoOpSkBiVAISoxLgRiXAjUpA9KgEhEA6SsD03LVyeYsB5nn/d3rOiL2bGJWAxKgEJEYlwI1KgBuVgOhRCQiBdJOAGXlr5ZJ3+psxAJ79eZysLCyOLUmMSkBiVAISoxLgRiXAjUpA9KgEhEA6ScC03DVy4Vv9zMRANT0BWLOhfJmzSkBiVAISoxLgRiXAjUpA9KgEhEC6SAAjAZ7TvK8c5AnAc7+Ml3VFJbElZaMSkBiVgMSoBLhRCXCjEhA9KgEhkC4SMGb+SrmsxUCp1cYTgOIdy5RVAhKjEpAYlQA3KgFuVAKiRyUgBNKpOWB67podFgBQCUiMSkBiVALcqAS4UQmIHpWAEEhlCeg1dYm812umLK3gZD8qAYlRCUiMSoAblQA3KgHRoxIQAqkqAQNmLJXz3+hrngIYOnt57N2dQyUgMSoBiVEJcKMS4EYlIHpUAkIgFSWgnycA5zbva8YBeKP7dFmxruzHAF2oBCRGJSAxKgFuVALcqAREj0pACKSaBPT3BOCM1/qYGoC3e86Q9RvL/xRAIlQCEqMSkBiVADcqAW5UAqJHJSAEUkkC+k9fKqe+3FMOrtvFjARYtGlzbEnFUAlIjEpAYlQC3KgEuFEJiJ6MkQButqKiInOz2VRcvLWK3L+ssLBwm2XlIZUkoNukXDm+aXfTBLBp85bYuxVHJSAxKgGJUQlwoxLgRiUgejJGAn755RfZb7/9ZJ999pF9993XpAcffNAs69mzp1SpUkX22msvsw7LHnnkESMG5SEVJGDzlq0Bf8PGElm4onCnHgN0oRKQGJWAxKgEuFEJcKMSED0ZIwGtW7eW0047TXJyckzGxE1nb7wuXbrIhRdeaC64LV4wtcv4uzxUtgTwFMAvoxYYAYgKlYDEqAQkRiXAjUqAG5WA6MkYCWjVqpUcddRR0r59exk8eLDMnTs3tmSrBCAIH3zwgQwcOFBmz54dW1I+qDFAAsK8kZGAspokkBSq/49p0l1OfqW3/LYsHAkJgmMyduzY2KvKBwlYu3Zt7FXlsmrVKpWABJSUlBgJINgpwSAB5a11zDTIq/Py8mKvlChYvnx56BLQr1+/1JOATp06yWWXXWbSJZdcIpdeeqlkZ2ebZQR+3rv44ovN+xdddJF07NgxYU3A4sWL5bvvvpOWLVua1Lx5c9ljjz1k8uTJMmfOHBMwK5IodbNtM2bMCFw+d85smT9/vvw8dJYc27i7HFK7ozz/dW+ZMfM3mROwfkUT+zRixAhzoYSxf2EkAsuUKVMqfXv4/UmTJhlpS5Vjk0pp5syZ5lqeNWtW4PJMT/Zenz59euDyTE7cT7169ZJx48bpvRVR4rhOmDDBiGhYx5jY9P3338uZZ54Zi5jBJF0CFi1aZIIqVcjz5s2T2rVrm5oBAjomREZOiY716tatK9WrVzd/B8H38Pm7777bpNtuu03+/ve/G2ulyryiafTo0ZKVlSUjR47cbtm4sVv/bfptN6lWu70c0aibvJk1XiZOGL/dumEmBIAbMmhZZSQyzmHDhgUuS3YaOnSo2Z6gZZmeRo0aZa5l/g1anumJe52aSCQ7aHmmJ2Q/rHxVU3AaMmSIdO3aNXDZzqTx48fLRx99lHoSEM/SpUvl2GOPlTZt2sTe+R2q5uggyIEpD1TbR9EcQHtqEK1H5cjxL/U2AvD14Lnl7rtQETA8bQ4IRpsDEqPNAWWjzQGJQQDIq5XoyM/Pz4zmgHhoZzryyCMDJaCgoMBIAKW78kAwSlbHQJ4CeKv7DDnhxZ7y3dB5pU8FRI12DEyMdgxMjHYMdKMdA91ox8DoyZiOgfQJoHqdnvy0U95///1y3HHHSW5urqmKw1wI5rRn1KhRwzQHlPfiS/bTAfkFxTJ+4UopDmkgoPKgEpAYlYDEqAS4UQlwoxIQPRkjAa+88oqcdNJJJrjz76233loa1H744QcjBIcffriccMIJctNNN5n25vIStgRs2bJZevboLkVFWyWguGSzdJmYa54EqCxUAhKjEpAYlQA3KgFuVAKiJ2MkgADNxcQ4AZT+/e3J3IAsoyMgywjqO0LYEjApZ6U0/7GbjJidZ0r7rUcskGr1suWmj4fIrLzKaQdXCUiMSkBiVALcqAS4UQmInoyRgCgJSwImL1old34+3HT4O6xBthz5Qjc549VeUr1htpz8ck9PBuYnrQ9APCoBiVEJSIxKgBuVADcqAdGjEhACYUgAJfxr3h8kB9TOkmr1s0vTwfWypXqDrtKs67TYmpWDSkBiVAISoxLgRiXAjUpA9KgEhEBFJYDSffNu08zUv4d4Qd8vAYd4qWqdLvJU63GyqaRyagFAJSAxKgGJUQlwoxLgRiUgelQCQqCiEpBfUCQPfTNK9n2+8zYCYBO1A1e3HCQL8wtjn0g+KgGJUQlIjEqAG5UANyoB0aMSEAIVlYC8NRvkvq9GyH6JJKBOllzx7kCZvawg9onkoxKQGJWAxKgEuFEJcKMSED0qASFQUQngCYD67SY6awLu+nKErCuqvIxUJSAxKgGJUQlwoxLgRiUgelQCQiCMjoGDZi2T01/tJfvX2lYEDqzTRY5q1E3ajcmJrVk5qAQkRiUgMSoBblQC3KgERI9KQAiEIQEbSzZLh3E5cs7rfUyNwAF1ss2/J73YQ74aPEc2bCyJrVk5qAQkRiUgMSoBblQC3KgERI9KQAiEIQFQsnmLafdnPIAnP+0mXw/8TablrjEjBlY2KgGJUQlIjEqAG5UANyoB0aMSEAJhSYCFmde6dO0uhSmUMagEJEYlIDEqAW5UAtyoBESPSkAIhC0BW8gYuneXogQTCFUGKgGJUQlIjEqAG5UANyoB0aMSEAJhSwAZg2sWwcpAJSAxKgGJUQlwoxLgRiUgelQCQkAlIPmoBKQHKgFuVALcqAREj0pACFgJKCwMZ0S/LVu2GAkoKiqKvVP5zJ49O+UkgOOeCqxcuVIlIAEEfySAfi7K9nCvIwGpJPypBBKQl5cXe6VEwfLly1UCKoqVgDAzulQLKgsXLpTJkyfHXlU+AwcOTBlJQv6GDBkSe6XEQ4agJKZfv35GBpTtGTFihKxatSr2SokCptUfNmxY7FU4IG8ZJQFUp/zhD3+QunXrSuPGjeWFF16oUGrUqJE89NBD0qBBg8DlyU7sU82aNeWJJ54IZf/CSP/973+lXr16KbE9nPeHH344cFmmp4YNG5prmX+Dlmd64l6vUaOG1K9fP3B5Jifu7UceeURq1aoVuFxTOKl27drmOAct25nUtGlTuf322+WEE05wyu0uVxNAwCZQhpGeeuopIxUEuqDlyU7chGeffbYcdthh5u+gdZKZnn32Wfnzn/8sd9xxh/k7aJ1kJX7/lltukd13312ee+65wHUyOT366KPmWkYgg5Znenr66afN8UGUgpZnciKv2WuvveSaa67ReyuixHG98cYb5R//+Ic8//zzgevsaHrmmWfM937yySemz0sidikJiIL9998/ZTq+wQcffCAPPvhg7FXlc9RRR8mMGTNiryqXcePGyYknnhh7pfihyWbffffV6m4HBx54oOTn58deKX4uuOAC6dmzZ+yVEgVDhw6VM844I/YqeagEOKAz1X777SdLly6NvVP5tGjRQu67777Yq8rnyCOPlEmTJsVeVS60W1L1pWyP7S+jvd+DoaRUpUoVWbx4cewdxc95550nXbp0ib1SooBOgaeffnrsVfJQCXDAY1V77713SvWKffvtt+Xuu++Ovap8aJqYMGFC7FXlQqeaY445JvZK8UNt1p577hnakzO7GnQmpqZk0aJFsXcUPzRDZmVlxV4pUUAn9FNOOSX2KnmoBDggY2jSpIkUFBTE3ql8BgwYIK1atYq9qnyaNWuWMqWn+fPny5tvvhl7pfihBoAOXsXFxbF3FD/UBNCRKpWa/lIJmiGnTp0ae6VEwaxZs0xNb7JRCSiDVBtchcwqlZ715vikSjsz26HPwSdGBwpyo8cnMdxX2p8kWior/1IJUBRFUZQMRSVAURRFUTIUlQBFURRFyVBUAhJAL+H27dvL559/Lr/++qvMmzcvtiR6GD6SZ0Zbt25tOgEGdbyjEwnLPvvsM7N9UQ3pSR8EHgHkN7744gvz74IFC2JLt4U2rezsbOncuXNk8wmwn/Si/fbbb832dOzY0Yy5DUwcxTCZ33//vVnGWPAcy0yF49K2bVsztLNt7+aJF47fV199ZY4hYyu4BhLZFeEa6tSpk7m3OQYzZ87crr2b49WrVy+zHhNTZRKjR482x+XLL78010r8Y6WMC/LDDz/I119/nVLzmKQqtPMz8Rt5FceVR5m5D/1MnDjRLCNNmTIl9u7v8AQWy7777rvQh41XCQiAIMez+FdddZXcddddZqSsG264wUzekwzGjx8vDzzwgPl9RjEjqPrhxmR0PEaYYrQ+0vTp02NLw4XAevPNN5t05513yhVXXCH/+c9/AscGGDx4sBlwheeto+pJzM3C6G4Mh8mjkpdeeqncf//9ZvKg4cOHy+WXXy633Xab2VYGOGH4zEzLxIGg9tprr5kRHblOOI8EtpYtW8q5555rjh3vX3zxxeb6ypROX8yE9+STT8rVV19triGu5TZt2myXKSPhDBR2+OGHy8iRI2Pv7togg4jz+eefb+4hjg/jA3DN2JlZp02bJpdcconJE0n//ve/dfyAMuDpsvfee0+uvfZa85gu+bX/KRSki3vy+uuvN/kreRh5qYUnwjjOfJ500UUXGZEIC5WAAD7++GMz6YK13Dlz5pjnN5s3b25eRw2DE3FhMFkQw3V27do1tkRMjQQZ14svvlg6cQ8XVFTPf/NIGQZLzQiBAqMluDI8sz/jZCwFMtaXX37ZZJxRjSJIKY5HAW3phEyJY8Q2Im+U3ij9k6FRk3PEEUdk5MQ57dq1M/MokKEwFC6ZOMfloIMOkg8//NAcH17fe++9cuuttxqJ2tXhmuGRVuSaewsopbHv/toQrmUC4OOPP26CIPdiJsAMijfddJMpAHGskMY33njDHANb+rznnnvMfc4x4l6sU6eOCWA60mJiyEMp3ZMnMjcABUt/TSkyRaGF65Djyj1JHo88cH0S9LkWKcwwP449B2GhEhAAVS4EfVuapfTAcLS8n2wYrMgvAZRauOneffddk8kzFnyySyrMpfDYY49tIwEEmtdff91UWx188MFJG0oYIcCuqR2JJycnR0466aSMG+SEDIeSHMeESUmoKUECyFQY5pmSnYUMhesovsp3VwSRtbVDSCzBjia3+FoA5qFgzAA7A1umSACFCq4HSqoW7mkCDvcZksC9TROJZdSoUXLqqacG3n/K9jAPAwHfSgCBfZ999tlm9lMKNOecc44RL2qfq1WrZprtLNRSVa1aNWGz7I6iEhAAJSRK/YgAmQBD0ZIpVMZoa/ESQC3F3/72N9M88f7775sZ/BgljwsjGVA7wk1Pu7uFfglULXN8xo4dG7kE8DtMjsEQm8cdd5wZYCNoEBxb9U1tQabAtWuDGMGNvwl2CAClXabL5Zhw7BBbMiT6l2QC1Oj99a9/NftOTdo777xj7m8GmCLAwU8//WSqZMmkqYZl3UyRAGr6uBZoaiTvO+2000xzEcPZcu3MnTvXjKrob5OmoERJ9ccff4y9o7hgQh+/BFBoohCTm5trXgPX3YUXXmj+5diTx3HtWhCDQw891EhqGKgEBECV95VXXmlO2C+//GJKDmQGYbbDlJd4CSDgkZFRI0B1HQGRUh8l8agHmiAToA2eAGyFiP4LDCnKMQPa7A855JBtLtqwYT9p/6eTIu27Z555ptk2P7RzE+TI1G2nuEzgm2++Ka1aBCSA/iVIEseBKXMvu+wyUwL+6KOPjBAwGpy/OnxXhWuUPjZcM1y/HA+aRih1cb1yDRH4bP8aOlQiCakyLHbUcA2Q3zGJDWJPYCcf5BpCLjl+SIC//xGCjShURi1pOhIvAQgmzZn++WkI7kgAwt67d29Tm+nvj8Z5oMmV5WGgEhAAUwgzU5/tdU6GQbU77YTJJl4C6PXOBeDvmEeJho4j8dWaYULVOm1XHBf/MMoEEKa/pDMgiUlq/vSnP5kqLAJw1FCFyXZRI2JhtjNunE8//TRyMUolqPInk2E6ZdtBE2G0r8lwqEa0JX8yfaYZpd9AVB1LUwmCPPvPdWFhvonjjz/elG4Ro7/85S+lx457b7fddjPVtQjnrg73NbWeBH/bUZSSKJJNwOH6OuCAA7YpgVIIQCT9TQRKYuIlgGYWJMDfkbp79+4mP6eZl5pV8nueYLGwLtcxBa4wUAkIgJIT7ah0fAECDaVfqryTTbwEEOCojrc9cglydDaht3dUJV4eUaT5gQ4t8W3HdFRBSMgMyEh5bJFe1VE9Jsg++mWHv+noxRMDtrqbGgDkJNNg/zlXtB9yPmiSIcOhRzHBjtoTJI2Mx4JU0ts76LGkXQ2uVar6aZe1NR8EvLPOOstkslyvZKwcOzJajs2xxx5rrmm/+O6qUNqnKY/aAAtt1dSG8NQAUOvHfCocP0SBdRGHTGlSqijxEgBU9/P0AMeT48okcUgAfdEogHIN8nggy8nvaQYmjwur0KcSEADWTzv7K6+8Yjq80AmPKXPJMJIBxk2w79Chg/zv//6v6cRE5zaqLLl46JhHlS6Blj4CtA9F1QOei5AgUb16dWOo9D2gJECmGXQRUtpEAqLqE8A28OgbmRLHiBoAmh8IcAQ/fpuetVg0j9mwrak0FXSyoQYLebNPB1DVSw9wnqKwVb9kTCzf1SETJaBzLzNGAB2wCGpMrBTUMZJ7iholOr9lAjQZUaAg6PN0Cfca18p1111XOk7Kzz//bPICmgsYK4Drh/xJSQyBnWYTjicFFqSTWIKYU6jhWPIUE8eTWiquOX/nXTpnnnzyyabJhbEZkAZEICxUAhJABsENQF8AbgK/HUeNrXrH9rjJuACokrOd8TDEhg0bmm2jzc5fUxA2SAftpPwWxm8TN76tKfFDtTLHy1/aDBP6ZRDUyKhoy6UvhO24Re0I71NT4t/W+HEWMglKFXSCs304EEkeNyIjohqXDCaTxlFAXBEBnhJAADg+yFEQXFfUCGZSx1IeN+Z6YawA8hxqQOPHRyEfos2aa4jjZx9VVoLhmqPZjbyIvJRE3s4TVraGyfbPIVEDFd+MSdBnGeeF7woTlQBFURRFyVBUAhRFURQlQ1EJUBRFUZQMRSVAURRFUTIUlQBFURRFyVBUAhRFURQlQ1EJUBRFUZQMRSVAUcqAwT4YTIbERDM8F82/9j3XSI08I7yjQxczmE3QMKz8Tvx2+F/v6O8w8AtjYcQ/Bx4Ez4+zLs8wRwWD+TBgDftC4m87sl9F4dgETTKlKJmOSoCilAEDFDFxColhnP/f//t/Zuav/fbbz7zHiJJBENQY0OjLL7+MvVM+GPWwbdu2sVe/w8Asdjv4fca1ZxhgXjNF8I4OioREENzLE2hZh3UJzlGA4DA0KnPXMwIko9IxxG/79u1ja1QMRmVj0C1FUbZFJUBRyoAARQCkxM1wxIz8xYiSBERG/GIER0aVYw4F5qwn+AOjJjIyGLNQMh69nWmRz/A36/M5vtsPk7QESYAN2oz+x5DSTHTDnOKM6rhw4UIzfjvL+V5+m1qIvLy80m1jAh1/rQXr8jlbQmY5o0DyHuszVKxdn5I079vZCe0+sC18P3MPxI88yDoMH82cEgzdzHFie4LgeB522GFmNDqmVeW7GaWT9y1sC/vF+P6MTOkf6Y/t5liwfWwLy+1QyGwHI2xSw8J5YHvtZ9l+jhvfyW/aobCRHraVuRiYSIzj4V9u4djzW/bz/uGXOR7MQcBnGQXUD8ee7eBzHCO2UVEqA5UARdkByLgZ25ux94FJaZivn2DPRB9MtcycBvDyyy+XzuDHTHXMwEggI7Cxnh0+lPH9/UEikQT44fcPOuigUoFgzHFqBRjyFUlp2rSpCdrMrcDwrpSCGSaXMfOtpFC1z3t2BkGC5L333msmo+I7GH7Zzp5HcGVdZqwEJITZI9944w0zhCw1EXyWQA8E2fr168vRRx9tvqdGjRpmUpRHH33ULI+HdRkqO1GVPcGX8daZM4Pjzz4xpK2VEvafGgS+h+FVmU8D+ULcmImNWdeYIZDjTQ0D8wFw7Nh+9ovjQ60NU3Vzjth+xtFn2xkim385PkiKFQGE7+GHHzaf5Xgxx4a9LhAq5vhguG0+y0yNdtIvxKJmzZpmW5i3n9/Pzs4OrelDUXYElQBF2QEo2RGEbKBn8h0yekqflHbJ3AlClCLJ1Jn7gQBlM3hK1LbUCLTHMx548+bNS0vd5ZEAfh8JsPM3MLHIn//8ZzPpFSVQginbY4Md0M+AQGlnfKP6nbng7TSlSAnTljJzHN/LxDrIC6VlSrysaycuIaD985//lOeff17y8/PN73Ac3nrrLbOc7SEw2tkkkQ/WZ276IJggi9/iN9lOAre/dMxkPsyTYftKEIAvuugiM/cBMLkKQZ7f53OIFs01dvKfDz/80ARdsBLEvtx8880yYcIE83rMmDFGHph0in1+8sknzbngu6iFQbAI+FbYCPLMVcHEVazPvxxb1r3jjjukbt265tgAx4P9Q1qYKIZjw/rA8ec6UAlQKgOVAEXZAeIlgKDKhDQWltOuTac7oMRKe7Qfgj0zihGEKFkzmQ2laBvQd0YC+A1K3fa1hRIpQYffQTSocif4Q7wEsF9ME2uhGr9atWqm9oNq7ngJYB50+3uUuCnlU+IHJnmihsNfskeIKL0nglI2U1bzO5TW+S5kAOyEWcwg+eabb5r+EZSuCaZArQYB2tZEEOiZba1169bmb2bbtBIANOXccsst5jso/VMjQN8OJIhpqdluSvlIgoWgzfmm5oSgT81LUD8Mjhc1IwgPUsL2UjNj16e2gG1lX5jpMlHth6IkA5UARdkBrATYal8y9oEDB5q/gRIdsyhS8gUkgJoAW/qklEgzAYGf6mZK0lQX33rrraVTHu+MBPAbfI8f2r8JqgQ7Zizj9wjqNpDHSwBBkhKzLZFSu0FQJBAT9PwSQMmdZg4bwFhO0Edm4PrrrzdCYZ9YoAqdfXRJALAex5j9p+qfY0nJnsDM9N7MgPjUU0+Z36LWBRkAjjc1MP7aAwItcsD+MEubXwKokuf7OQ+cA76PVLt2bdMZEanhmDGLoIXtqlKlivmXEj7NIbT3x9O7d28jAdQGsN32u6nl4FgiVF999ZVpdkEyWI/Op/YaUZRkohKgKDuAlQBbE0D1sS1ZA6VnglGbNm3MayTACgHQyezggw+Wb775prSnPUGN9vCwJaBRo0ampEvJlUBIgGR733vvPbM8SAI++OCDUglgX1wSQDu73QeWE6Dvuece8xoZIIgiPRa2j8BdXpgim+3luNDWf+ONN5Y2bcQTJAGcJysB1ATYWgMgEF999dWm30AQfA/745cApMhKALU5nCfm3Y+H5gWebqBZxQVNA8wpjzAhBIiHoiQblQBF2QHiJYBAS3AhUNJWTGmbDnAEeyBwUvq1vdEpQVKlTBUxAZIOdrymRGh7zoclAS+99JKpgico0YGREi+PFrZs2dIsj1ICaCKhDRwZoo8Cv7nHHnsk7BPw008/mXZztoUAzG9zTOh0SO0A7fQcd/o8cJw4jv379y/tbOeSAErYBGsevaQGwGLFgG2l3wLLqAWg2h9JcEkA0ObPMSOQ83lK8zQFUDtC6Z6aBp4m4bt4nyYihIbzwfnlMyy7/fbbzb5yDBUl2agEKMoOQCCgWpo2fSAIU+KlPZ6q9quuusq081oIPnQeI9DTTk5AI9gdccQRprc51cTXXnutqbLn0TigrZ3H41zw+4wPYCWA6m6Crh86z915552mNE2nP3q5065uq9Cpkqb/Ao+oAftALYGVAKq6EY3Ro0ebAMW6dkwEOujRFOKXADrKEdCAUi0Bm/3kewmodOyrVauWWR4PpX6aCwjciAdV+dQm2M5zlLwRD44vpWzOAf0GbEn8k08+McfZLwGsY5tiEBFK/pwjmgAI3ARgPsdTAXwnAZ3zQMdNlrE/nDMLYsI4EVwDgIjYZgr6WiBJffv2NcsQFYSH/eG7Of68Zvv69etnaohoMuDYUAuEGGhzgFIZqAQoyg5AMKKU76/mJgBSiqQmwAZlC23irM/jera6n+8gSLA+1duUCPmcDb6sZ4NrIvh91rOfYRts7YMfaiCQAbaPdaiCJsABgZrgyPYAn2cdC+9Tg4C42EBqP2t/3wYu/rX7YWEdOurx++wvj8NR0xAEv8W2IUIcK/71B3TgN1iH48Z3sr3+PgkEZX8gjd8fto/Pcizs8bX7aN9nHzlndn/8zQ+syz7b4wUcQ7vNLPN38mMZ+2/3h+8D1rG/SYq/ZhQlmagEKIoSOsgHtRlU49OcQE0ApXACoqIoqYNKgKIooUNpl97wDKBEswfzDtiqfUVRUgeVAEVRFEXJUFQCFEVRFCVDUQlQFEVRlAxFJUBRFEVRMhSVAEVRFEXJUFQCFEVRFCVDUQlQFEVRlAxFJUBRFEVRMhSVAEVRFEXJSET+P/Q4FJv2JLkbAAAAAElFTkSuQmCC) **Demo Code**The following code demonstrates the training of a SetFitABSA model and its evaluation over the **FiQA_SA** financial dataset.Specifically:* We sample **k=24** sentences from the training set using an arbitrary seed* We trained the SetFitABSA model using only these **k** samples* We evaluate the trained model over the entire test set which is exactly the same set that was used to evalute BloombergGPT model. The evaluated task is **SB2** - aspects are given and the model needs to predict their corresponding polarityWith just 24 training sentences we get a weighted F1-score which is on par or better than the 75.07 score achieved by BloombergGPT (try it yourself!) Install setfit with **SetFitABSA option**We have to install SetFit as well as download a spaCy model for doing the initial aspect span candidate selection. We will download `en_core_web_lg`.<jupyter_code>!pip install -U "setfit[absa]"
!python -m spacy download en_core_web_lg<jupyter_output><empty_output><jupyter_text>Import required packages<jupyter_code>from setfit import AbsaTrainer, TrainingArguments, AbsaModel
from datasets import load_dataset
from sklearn.metrics import f1_score<jupyter_output><empty_output><jupyter_text>Load the **FiQA_SA** dataset<jupyter_code>dataset = load_dataset("ronenlap/SetFitAbsa_FiQA")
train_ds = dataset["train"]
test_ds = dataset["test"]
train_ds
train_ds[0]<jupyter_output><empty_output><jupyter_text>Simulate the few-shot regime by sampling **k** text reviews for training<jupyter_code>k = 24
seed = 35
experiment_ds = train_ds.shuffle(seed=seed).select(range(k))
experiment_ds<jupyter_output><empty_output><jupyter_text>**Training a SetFitABSA Model** Initialize an ABSA modelWe'll initialize an AbsaModel using the strong [paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) base model.<jupyter_code>model = AbsaModel.from_pretrained(
"sentence-transformers/paraphrase-mpnet-base-v2",
)<jupyter_output>model_head.pkl not found on HuggingFace Hub, initialising classification head with random weights. You should TRAIN this model on a downstream task to use it for predictions and inference.
model_head.pkl not found on HuggingFace Hub, initialising classification head with random weights. You should TRAIN this model on a downstream task to use it for predictions and inference.<jupyter_text>Setting the training arguments<jupyter_code>args = TrainingArguments(
num_epochs=1,
batch_size=4,
num_iterations=20,
save_strategy="no",
report_to="none"
)<jupyter_output><empty_output><jupyter_text>Creating a trainer object and executing the SetFitABSA model We need to apply a column mapping as the AbsaTrainer expects `"text"`, `"span"`, `"label"` and `"ordinal"` columns. The text refers to the sentence, whereas span is an aspect span in the sentence. The label is the corresponding label (e.g. "positive"), while ordinal is used to distinguish spans if they occur multiple times in the sentence. For example, if `"stock"` is the current aspect span, and it occurs 3 times in the sentence, an ordinal of `0` indicates that the sample is referring to the first occurrence.<jupyter_code>trainer = AbsaTrainer(
model,
args=args,
train_dataset=experiment_ds, # if you want to train over the entire train set change experiment_ds to train_ds
column_mapping={
"sentence": "text",
"aspect": "span",
"polarity": "label",
"ordinal": "ordinal",
},
)
trainer.train()<jupyter_output><empty_output><jupyter_text>Now, when training has completed, `model` is a trained ABSA model. **Evaluation**As we mentioned above, the evaluated ABSA task is SB2 - aspects are given and the model just needs to predict their corresponding polarity.SetFitABSA supports this by providing the `predict` method with a dataset in a similar format as required for training, minus the `label` column.<jupyter_code># rename columns according to predict() requirements
test_ds = test_ds.rename_columns({"sentence": "text","aspect": "span"})
output = model.predict(test_ds) # a new column which holds the predicted polarity, "pred_polarity", is added to the dataset
output<jupyter_output><empty_output><jupyter_text>Then, we can compute the weighted F1 score by taking both the gold polarity and the predicted polarity.<jupyter_code>weighted_f1_score = f1_score(output["polarity"], output["pred_polarity"], average="weighted")
print(f"weighted_f1_score: {weighted_f1_score}")<jupyter_output>weighted_f1_score: 0.7819641265152422<jupyter_text>**Inference**Now that we've trained and evaluated the model, let's also try it out with some examples to get an intuitive feel of the model as well.<jupyter_code>sentences = [
"#Tesla: Model X Recall Adds To Reliability Issues $TSLA https://t.co/jVXQ4DoXnP",
"$CIEN seems to have broken out of a major horizontal resistance. Targets $14.35.",
"$AAPL I am big OUT from this. seems its falling towards 530.. :(",
]
model.predict(sentences)<jupyter_output><empty_output> | setfit/notebooks/setfit-absa-fiqa.ipynb/0 | {
"file_path": "setfit/notebooks/setfit-absa-fiqa.ipynb",
"repo_id": "setfit",
"token_count": 78712
} | 382 |
#import argparse
import os
import json
from src.utils.Config import Config
from src.train import train
def call_adapet(updated_args):
'''
parser = argparse.ArgumentParser()
# Arguments for running any datasets
parser.add_argument('-d', "--data_dir", default=configs['generic_data_dir'],
help="Data directory containing train/val/test jsonl files")
parser.add_argument('-p', "--pattern", default=configs['pattern'],
help="Pattern to be used for this dataset")
parser.add_argument('-v', "--dict_verbalizer", type=json.loads, default=configs['dict_verbalizer'],
help="Dictionary mapping label name (in dataset) to the verbalizer to use, e.g. '{\"0\": \"Yes\", \"1\": \"No\"}'")
# Model and training hyperparams
parser.add_argument('-w', '--pretrained_weight', type=str, default=configs['pretrained_weight'],
help='Pretrained model weights from huggingface')
parser.add_argument('-bs', '--batch_size', type=int, default=1, help='batch size during training')
parser.add_argument('--eval_batch_size', type=int, default=configs['batch_size'],
help='batch size during evaluation')
parser.add_argument('--grad_accumulation_factor', type=int, default=16, help='number of gradient accumulation steps')
parser.add_argument('--num_batches', type=int, default=configs['num_batches'],
help='number of batches for experiment; 1 batch = grad_accumulation_factor x batch_size')
parser.add_argument('--eval_every', type=int, default=configs['eval_every'],
help='number of training batches per evaluation')
parser.add_argument('--max_text_length', type=int, default=256, help='maximum total input sequence length after tokenization for ADAPET')
parser.add_argument('--lr', type=float, default=1e-5, help='learning rate for the model')
parser.add_argument('--weight_decay', type=float, default=1e-2, help='weight decay for the optmizer')
parser.add_argument('--grad_clip_norm', type=float, default=1, help='gradient clipping norm')
parser.add_argument('--warmup_ratio', type=float, default=0.06, help='linear warmup over warmup_steps for num_batches')
# ADAPET hyperparameters
parser.add_argument('--pattern_idx', default=1, help="Pattern index among all patterns available; For SuperGLUE, can use numbers >1 depending on dataset. For a new dataset, please set this to 1.")
parser.add_argument('--mask_alpha', type=float, default=0.105, help='masking ratio for the label conditioning loss')
parser.add_argument('--idx_txt_trim', type=int, default=1, help="TXT_ID of the text that can be trimmed (usually the longer text). Eg. if TXT1 needs to be trimmed, set this to 1.")
parser.add_argument('--max_num_lbl_tok', type=int, default=configs['max_num_lbl_tok'], help="The maximum number of tokens per label for the verbalizer. It will raise an error if the tokenizer tokenizes a label into more than 'max_num_lbl_tok' tokens.")
# Replicating SuperGLUE results
parser.add_argument('-c', '--config', type=str, default=None, help='Use this for replicating SuperGLUE results.')
'''
#args = final_parser.parse_args()
args = updated_args
# If even one of these three arguments are provided, we need all three as input
if args.data_dir or args.pattern or args.dict_verbalizer:
assert args.data_dir and args.pattern and args.dict_verbalizer, 'Please enter all of data_dir, pattern, dict_verbalizer!'
if args.config is not None:
use_config = args.config
else:
assert args.data_dir or args.pattern or args.dict_verbalizer, 'Please enter all of data_dir, pattern, dict_verbalizer if not providing config!'
use_config = os.path.join("config", "Generic.json")
update_config = vars(args)
config = Config(use_config, update_config, mkdir=True)
train(config)
print(config.exp_dir)
return config.exp_dir
| setfit/scripts/adapet/ADAPET/cli.py/0 | {
"file_path": "setfit/scripts/adapet/ADAPET/cli.py",
"repo_id": "setfit",
"token_count": 1453
} | 383 |
import os
import json
import random
import itertools
import numpy as np
import torch
from collections import defaultdict
from src.data.tokenize import tokenize_pet_txt, tokenize_pet_mlm_txt
from src.utils.util import device
class BoolQReader(object):
'''
BoolQReader reads BoolQ dataset
'''
def __init__(self, config, tokenizer):
self.config = config
self.tokenizer = tokenizer
self.num_lbl = 2
self.pet_labels = [["yes", "no"], ["true", "false"]]
self.pet_patterns = [["[PARAGRAPH]", " Question : [QUESTION] ? Answer : {}. [SEP]".format(self.tokenizer.mask_token), ""],
["[PARAGRAPH]", " Based on the previous passage, [QUESTION] ? {}. [SEP]".format(self.tokenizer.mask_token), ""],
["Based on the following passage, [QUESTION] ? {}. ".format(self.tokenizer.mask_token), " [PARAGRAPH] [SEP]", ""]]
self.pet_pvps = list(itertools.product(self.pet_patterns, self.pet_labels))
self._num_pets = len(self.pet_pvps)
self._pet_names = ["PET{}".format(i+1) for i in range(self._num_pets)]
self.list_true_lbl = []
self.dict_lbl_2_idx = {True: 0, False: 1}
def _get_file(self, split):
'''
Get filename of split
:param split:
:return:
'''
if split.lower() == "train":
file = os.path.join("data", "fewglue", "BoolQ", "train.jsonl")
elif split.lower() == "dev":
file = os.path.join("data", "superglue", "BoolQ", "val.jsonl")
elif split.lower() == "test":
file = os.path.join("data", "superglue", "BoolQ", "test.jsonl")
return file
def get_num_lbl_tok(self):
return 1
def read_dataset(self, split=None, is_eval=False):
'''
Read the dataset
:param split: partition of the dataset
:param is_eval:
:return:
'''
file = self._get_file(split)
data = []
with open(file, 'r') as f_in:
for i, line in enumerate(f_in.readlines()):
json_string = json.loads(line)
dict_input = {}
dict_input["question"] = json_string["question"]
dict_input["passage"] = json_string["passage"]
dict_input["idx"] = json_string["idx"]
dict_output = {}
if "label" in json_string:
dict_output["lbl"] = self.dict_lbl_2_idx[json_string["label"]]
else:
dict_output["lbl"] = -1
dict_input_output = {"input": dict_input, "output": dict_output}
data.append(dict_input_output)
return data
@property
def pets(self):
return self._pet_names
def prepare_pet_batch(self, batch, mode="PET1"):
'''
Prepare for train
:param batch:
:return:
'''
list_question = batch["input"]["question"]
list_passage = batch["input"]["passage"]
list_input_ids = []
bs = len(batch["input"]["question"])
list_mask_idx = np.ones((bs, self.get_num_lbl_tok())) * self.config.max_text_length
pattern, label = self.pet_pvps[self._pet_names.index(mode)]
for b_idx, (p, q) in enumerate(zip(list_passage, list_question)):
mask_txt_split_tuple = []
txt_trim = -1
for idx, txt_split in enumerate(pattern):
mask_txt_split_inp = txt_split.replace("[PARAGRAPH]", p).replace("[QUESTION]", q)
mask_txt_split_tuple.append(mask_txt_split_inp)
# Trim the paragraph
if "[PARAGRAPH]" in txt_split:
txt_trim = idx
input_ids, mask_idx = tokenize_pet_txt(self.tokenizer, self.config, mask_txt_split_tuple[0], mask_txt_split_tuple[1], mask_txt_split_tuple[2], mask_txt_split_tuple[0], mask_txt_split_tuple[1], mask_txt_split_tuple[2], txt_trim)
list_input_ids.append(input_ids)
list_mask_idx[b_idx,:self.get_num_lbl_tok()] = range(mask_idx, mask_idx+self.get_num_lbl_tok())
return torch.tensor(list_input_ids).to(device), torch.tensor(list_mask_idx).to(device), label
def prepare_pet_mlm_batch(self, batch, mode="PET1"):
'''
Prepare for train
:param batch:
:return:
'''
list_question = batch["input"]["question"]
list_passage = batch["input"]["passage"]
bs = len(batch["input"]["question"])
prep_lbl = np.random.randint(self.num_lbl, size=bs)
tgt = torch.from_numpy(prep_lbl).long() == batch["output"]["lbl"]
pattern, label = self.pet_pvps[self._pet_names.index(mode)]
list_orig_input_ids = []
list_masked_input_ids = []
for b_idx, (p, q, lbl) in enumerate(zip(list_passage, list_question, prep_lbl)):
txt_split_tuple = []
txt_trim = -1
for idx, txt_split in enumerate(pattern):
txt_split_inp = txt_split.replace("[PARAGRAPH]", p).replace("[QUESTION]", q).replace("[MASK]", label[lbl])
txt_split_tuple.append(txt_split_inp)
# Trim the paragraph
if "[PARAGRAPH]" in txt_split:
txt_trim = idx
orig_input_ids, masked_input_ids, mask_idx = tokenize_pet_mlm_txt(self.tokenizer, self.config, txt_split_tuple[0], txt_split_tuple[1], txt_split_tuple[2], txt_trim)
list_orig_input_ids.append(orig_input_ids)
list_masked_input_ids.append(masked_input_ids)
return torch.tensor(list_orig_input_ids).to(device), torch.tensor(list_masked_input_ids).to(device), prep_lbl, tgt.to(device)
def prepare_eval_pet_batch(self, batch, mode="PET1"):
return self.prepare_pet_batch(batch, mode)
def store_test_lbl(self, list_idx, pred_lbl, true_lbl, logits):
self.list_true_lbl.append(pred_lbl)
def flush_file(self, write_file):
self.list_true_lbl = torch.cat(self.list_true_lbl, dim=0).cpu().int().numpy().tolist()
read_file = self._get_file("test")
with open(read_file, 'r') as f_in:
for ctr, line in enumerate(f_in.readlines()):
answer_dict = {}
answer_dict["idx"] = ctr
pred_lbl = self.list_true_lbl[ctr]
if pred_lbl == 0:
answer = "true"
else:
answer = "false"
answer_dict["label"] = answer
write_file.write(json.dumps(answer_dict) + "\n")
| setfit/scripts/adapet/ADAPET/src/data/BoolQReader.py/0 | {
"file_path": "setfit/scripts/adapet/ADAPET/src/data/BoolQReader.py",
"repo_id": "setfit",
"token_count": 3304
} | 384 |
import argparse
import os
import torch
import numpy as np
from transformers import *
from src.data.Batcher import Batcher
from src.utils.Config import Config
from src.utils.util import device, ParseKwargs
from src.adapet import adapet
from src.eval.eval_model import dev_eval
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m', "--model_dir", required=True)
parser.add_argument('-c', "--config_file", required=True)
parser.add_argument('-k', '--kwargs', nargs='*', action=ParseKwargs, default={})
args = parser.parse_args()
config = Config(args.config_file, args.kwargs, mkdir=True)
tokenizer = AutoTokenizer.from_pretrained(config.pretrained_weight)
batcher = Batcher(config, tokenizer, config.dataset)
dataset_reader = batcher.get_dataset_reader()
model = adapet(config, tokenizer, dataset_reader).to(device)
model.load_state_dict(torch.load(os.path.join(args.model_dir, "best_model.pt")))
dev_eval(config, model, batcher, 0)
| setfit/scripts/adapet/ADAPET/src/run_pretrained.py/0 | {
"file_path": "setfit/scripts/adapet/ADAPET/src/run_pretrained.py",
"repo_id": "setfit",
"token_count": 363
} | 385 |
import argparse
import json
import os
import pathlib
import sys
import warnings
from collections import Counter
from shutil import copyfile
from warnings import simplefilter
from datasets import load_dataset
from sentence_transformers import models
from typing_extensions import LiteralString
from setfit import SetFitModel, SetFitTrainer
from setfit.data import get_templated_dataset
from setfit.utils import DEV_DATASET_TO_METRIC, LOSS_NAME_TO_CLASS, TEST_DATASET_TO_METRIC
sys.path.insert(0, os.path.join(os.path.dirname(__file__), ".."))
# ignore all future warnings
simplefilter(action="ignore", category=FutureWarning)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--model", default="paraphrase-mpnet-base-v2")
parser.add_argument("--eval_dataset", default="SetFit/emotion")
parser.add_argument("--candidate_labels", nargs="+")
parser.add_argument("--reference_dataset", default=None)
parser.add_argument("--label_names_column", default="label_text")
parser.add_argument("--aug_sample_size", type=int, default=8)
parser.add_argument("--num_iterations", type=int, default=20)
parser.add_argument("--num_epochs", type=int, default=1)
parser.add_argument("--batch_size", type=int, default=16)
parser.add_argument("--max_seq_length", type=int, default=256)
parser.add_argument(
"--classifier",
default="logistic_regression",
choices=[
"logistic_regression",
"svc-rbf",
"svc-rbf-norm",
"knn",
"pytorch",
"pytorch_complex",
],
)
parser.add_argument("--loss", default="CosineSimilarityLoss")
parser.add_argument("--exp_name", default="")
parser.add_argument("--add_normalization_layer", default=False, action="store_true")
parser.add_argument("--optimizer_name", default="AdamW")
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--override_results", default=False, action="store_true")
parser.add_argument("--keep_body_frozen", default=False, action="store_true")
args = parser.parse_args()
return args
def create_results_path(dataset: str, split_name: str, output_path: str) -> LiteralString:
results_path = os.path.join(output_path, dataset, split_name, "results.json")
print(f"\n\n======== {os.path.dirname(results_path)} =======")
os.makedirs(os.path.dirname(results_path), exist_ok=True)
return results_path
def main():
args = parse_args()
parent_directory = pathlib.Path(__file__).parent.absolute()
output_path = (
parent_directory
/ "results"
/ f"{args.model.replace('/', '-')}-{args.loss}-{args.classifier}-iterations_{args.num_iterations}-batch_{args.batch_size}-{args.exp_name}".rstrip(
"-"
)
)
os.makedirs(output_path, exist_ok=True)
# Save a copy of this training script and the run command in results directory
train_script_path = os.path.join(output_path, "train_script.py")
copyfile(__file__, train_script_path)
with open(train_script_path, "a") as f_out:
f_out.write("\n\n# Script was called via:\n#python " + " ".join(sys.argv))
# Configure loss function
loss_class = LOSS_NAME_TO_CLASS[args.loss]
metric = DEV_DATASET_TO_METRIC.get(args.eval_dataset, TEST_DATASET_TO_METRIC.get(args.eval_dataset, "accuracy"))
if args.reference_dataset is None and args.candidate_labels is None:
args.reference_dataset = args.eval_dataset
train_data = get_templated_dataset(
reference_dataset=args.reference_dataset,
candidate_labels=args.candidate_labels,
sample_size=args.aug_sample_size,
label_names_column=args.label_names_column,
)
test_data = load_dataset(args.eval_dataset, split="test")
print(f"Evaluating {args.eval_dataset} using {metric!r}.")
# Report on an imbalanced test set
counter = Counter(test_data["label"])
label_samples = sorted(counter.items(), key=lambda label_samples: label_samples[1])
smallest_n_samples = label_samples[0][1]
largest_n_samples = label_samples[-1][1]
# If the largest class is more than 50% larger than the smallest
if largest_n_samples > smallest_n_samples * 1.5:
warnings.warn(
"The test set has a class imbalance "
f"({', '.join(f'label {label} w. {n_samples} samples' for label, n_samples in label_samples)})."
)
results_path = create_results_path(args.eval_dataset, "zeroshot", output_path)
if os.path.exists(results_path) and not args.override_results:
print(f"Skipping finished experiment: {results_path}")
exit()
# Load model
if args.classifier == "pytorch":
model = SetFitModel.from_pretrained(
args.model,
use_differentiable_head=True,
head_params={"out_features": len(set(train_data["label"]))},
)
else:
model = SetFitModel.from_pretrained(args.model)
model.model_body.max_seq_length = args.max_seq_length
if args.add_normalization_layer:
model.model_body._modules["2"] = models.Normalize()
# Train on current split
trainer = SetFitTrainer(
model=model,
train_dataset=train_data,
eval_dataset=test_data,
metric=metric,
loss_class=loss_class,
batch_size=args.batch_size,
num_epochs=args.num_epochs,
num_iterations=args.num_iterations,
)
if args.classifier == "pytorch":
trainer.freeze()
trainer.train()
trainer.unfreeze(keep_body_frozen=args.keep_body_frozen)
trainer.train(
num_epochs=25,
body_learning_rate=1e-5,
learning_rate=args.lr, # recommend: 1e-2
l2_weight=0.0,
batch_size=args.batch_size,
)
else:
trainer.train()
# Evaluate the model on the test data
metrics = trainer.evaluate()
print(f"Metrics: {metrics}")
with open(results_path, "w") as f_out:
json.dump(
{"score": metrics[metric] * 100, "measure": metric},
f_out,
sort_keys=True,
)
if __name__ == "__main__":
main()
| setfit/scripts/setfit/run_zeroshot.py/0 | {
"file_path": "setfit/scripts/setfit/run_zeroshot.py",
"repo_id": "setfit",
"token_count": 2605
} | 386 |
__version__ = "1.1.0.dev0"
import importlib
import os
import warnings
from .data import get_templated_dataset, sample_dataset
from .model_card import SetFitModelCardData
from .modeling import SetFitHead, SetFitModel
from .span import AbsaModel, AbsaTrainer, AspectExtractor, AspectModel, PolarityModel
from .trainer import SetFitTrainer, Trainer
from .trainer_distillation import DistillationSetFitTrainer, DistillationTrainer
from .training_args import TrainingArguments
# Ensure that DeprecationWarnings are shown by default, as recommended by
# https://docs.python.org/3/library/warnings.html#overriding-the-default-filter
warnings.filterwarnings("default", category=DeprecationWarning)
# If codecarbon is installed and the log level is not defined,
# automatically overwrite the default to "error"
if importlib.util.find_spec("codecarbon") and "CODECARBON_LOG_LEVEL" not in os.environ:
os.environ["CODECARBON_LOG_LEVEL"] = "error"
| setfit/src/setfit/__init__.py/0 | {
"file_path": "setfit/src/setfit/__init__.py",
"repo_id": "setfit",
"token_count": 289
} | 387 |
from collections import defaultdict
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Tuple, Union
from datasets import Dataset
from transformers.trainer_callback import TrainerCallback
from setfit.span.modeling import AbsaModel, AspectModel, PolarityModel
from setfit.training_args import TrainingArguments
from .. import logging
from ..trainer import ColumnMappingMixin, Trainer
if TYPE_CHECKING:
import optuna
logger = logging.get_logger(__name__)
class AbsaTrainer(ColumnMappingMixin):
"""Trainer to train a SetFit ABSA model.
Args:
model (`AbsaModel`):
The AbsaModel model to train.
args (`TrainingArguments`, *optional*):
The training arguments to use. If `polarity_args` is not defined, then `args` is used for both
the aspect and the polarity model.
polarity_args (`TrainingArguments`, *optional*):
The training arguments to use for the polarity model. If not defined, `args` is used for both
the aspect and the polarity model.
train_dataset (`Dataset`):
The training dataset. The dataset must have "text", "span", "label" and "ordinal" columns.
eval_dataset (`Dataset`, *optional*):
The evaluation dataset. The dataset must have "text", "span", "label" and "ordinal" columns.
metric (`str` or `Callable`, *optional*, defaults to `"accuracy"`):
The metric to use for evaluation. If a string is provided, we treat it as the metric
name and load it with default settings.
If a callable is provided, it must take two arguments (`y_pred`, `y_test`).
metric_kwargs (`Dict[str, Any]`, *optional*):
Keyword arguments passed to the evaluation function if `metric` is an evaluation string like "f1".
For example useful for providing an averaging strategy for computing f1 in a multi-label setting.
callbacks (`List[`[`~transformers.TrainerCallback`]`]`, *optional*):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in [here](https://huggingface.co/docs/transformers/main/en/main_classes/callback).
If you want to remove one of the default callbacks used, use the [`Trainer.remove_callback`] method.
column_mapping (`Dict[str, str]`, *optional*):
A mapping from the column names in the dataset to the column names expected by the model.
The expected format is a dictionary with the following format:
`{"text_column_name": "text", "span_column_name": "span", "label_column_name: "label", "ordinal_column_name": "ordinal"}`.
"""
_REQUIRED_COLUMNS = {"text", "span", "label", "ordinal"}
def __init__(
self,
model: AbsaModel,
args: Optional[TrainingArguments] = None,
polarity_args: Optional[TrainingArguments] = None,
train_dataset: Optional["Dataset"] = None,
eval_dataset: Optional["Dataset"] = None,
metric: Union[str, Callable[["Dataset", "Dataset"], Dict[str, float]]] = "accuracy",
metric_kwargs: Optional[Dict[str, Any]] = None,
callbacks: Optional[List[TrainerCallback]] = None,
column_mapping: Optional[Dict[str, str]] = None,
) -> None:
self.model = model
self.aspect_extractor = model.aspect_extractor
if train_dataset is not None and column_mapping:
train_dataset = self._apply_column_mapping(train_dataset, column_mapping)
aspect_train_dataset, polarity_train_dataset = self.preprocess_dataset(
model.aspect_model, model.polarity_model, train_dataset
)
if eval_dataset is not None and column_mapping:
eval_dataset = self._apply_column_mapping(eval_dataset, column_mapping)
aspect_eval_dataset, polarity_eval_dataset = self.preprocess_dataset(
model.aspect_model, model.polarity_model, eval_dataset
)
self.aspect_trainer = Trainer(
model.aspect_model,
args=args,
train_dataset=aspect_train_dataset,
eval_dataset=aspect_eval_dataset,
metric=metric,
metric_kwargs=metric_kwargs,
callbacks=callbacks,
)
self.aspect_trainer._set_logs_mapper(
{
"eval_embedding_loss": "eval_aspect_embedding_loss",
"embedding_loss": "aspect_embedding_loss",
}
)
self.polarity_trainer = Trainer(
model.polarity_model,
args=polarity_args or args,
train_dataset=polarity_train_dataset,
eval_dataset=polarity_eval_dataset,
metric=metric,
metric_kwargs=metric_kwargs,
callbacks=callbacks,
)
self.polarity_trainer._set_logs_mapper(
{
"eval_embedding_loss": "eval_polarity_embedding_loss",
"embedding_loss": "polarity_embedding_loss",
}
)
def preprocess_dataset(
self, aspect_model: AspectModel, polarity_model: PolarityModel, dataset: Dataset
) -> Dataset:
if dataset is None:
return dataset, dataset
# Group by "text"
grouped_data = defaultdict(list)
for sample in dataset:
text = sample.pop("text")
grouped_data[text].append(sample)
def index_ordinal(text: str, target: str, ordinal: int) -> Tuple[int, int]:
find_from = 0
for _ in range(ordinal + 1):
start_idx = text.index(target, find_from)
find_from = start_idx + 1
return start_idx, start_idx + len(target)
def overlaps(aspect: slice, aspects: List[slice]) -> bool:
for test_aspect in aspects:
overlapping_indices = set(range(aspect.start, aspect.stop + 1)) & set(
range(test_aspect.start, test_aspect.stop + 1)
)
if overlapping_indices:
return True
return False
docs, aspects_list = self.aspect_extractor(grouped_data.keys())
aspect_aspect_list = []
aspect_labels = []
polarity_aspect_list = []
polarity_labels = []
for doc, aspects, text in zip(docs, aspects_list, grouped_data):
# Collect all of the gold aspects
gold_aspects = []
gold_polarity_labels = []
for annotation in grouped_data[text]:
try:
start, end = index_ordinal(text, annotation["span"], annotation["ordinal"])
except ValueError:
logger.info(
f"The ordinal of {annotation['ordinal']} for span {annotation['span']!r} in {text!r} is too high. "
"Skipping this sample."
)
continue
gold_aspect_span = doc.char_span(start, end)
if gold_aspect_span is None:
continue
gold_aspects.append(slice(gold_aspect_span.start, gold_aspect_span.end))
gold_polarity_labels.append(annotation["label"])
# The Aspect model uses all gold aspects as "True", and all non-overlapping predicted
# aspects as "False"
aspect_labels.extend([True] * len(gold_aspects))
aspect_aspect_list.append(gold_aspects[:])
for aspect in aspects:
if not overlaps(aspect, gold_aspects):
aspect_labels.append(False)
aspect_aspect_list[-1].append(aspect)
# The Polarity model uses only the gold aspects and labels
polarity_labels.extend(gold_polarity_labels)
polarity_aspect_list.append(gold_aspects)
aspect_texts = list(aspect_model.prepend_aspects(docs, aspect_aspect_list))
polarity_texts = list(polarity_model.prepend_aspects(docs, polarity_aspect_list))
return Dataset.from_dict({"text": aspect_texts, "label": aspect_labels}), Dataset.from_dict(
{"text": polarity_texts, "label": polarity_labels}
)
def train(
self,
args: Optional[TrainingArguments] = None,
polarity_args: Optional[TrainingArguments] = None,
trial: Optional[Union["optuna.Trial", Dict[str, Any]]] = None,
**kwargs,
) -> None:
"""
Main training entry point.
Args:
args (`TrainingArguments`, *optional*):
Temporarily change the aspect training arguments for this training call.
polarity_args (`TrainingArguments`, *optional*):
Temporarily change the polarity training arguments for this training call.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
"""
self.train_aspect(args=args, trial=trial, **kwargs)
self.train_polarity(args=polarity_args, trial=trial, **kwargs)
def train_aspect(
self,
args: Optional[TrainingArguments] = None,
trial: Optional[Union["optuna.Trial", Dict[str, Any]]] = None,
**kwargs,
) -> None:
"""
Train the aspect model only.
Args:
args (`TrainingArguments`, *optional*):
Temporarily change the aspect training arguments for this training call.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
"""
self.aspect_trainer.train(args=args, trial=trial, **kwargs)
def train_polarity(
self,
args: Optional[TrainingArguments] = None,
trial: Optional[Union["optuna.Trial", Dict[str, Any]]] = None,
**kwargs,
) -> None:
"""
Train the polarity model only.
Args:
args (`TrainingArguments`, *optional*):
Temporarily change the aspect training arguments for this training call.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
"""
self.polarity_trainer.train(args=args, trial=trial, **kwargs)
def add_callback(self, callback: Union[type, TrainerCallback]) -> None:
"""
Add a callback to the current list of [`~transformers.TrainerCallback`].
Args:
callback (`type` or [`~transformers.TrainerCallback`]):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will instantiate a member of that class.
"""
self.aspect_trainer.add_callback(callback)
self.polarity_trainer.add_callback(callback)
def pop_callback(self, callback: Union[type, TrainerCallback]) -> Tuple[TrainerCallback, TrainerCallback]:
"""
Remove a callback from the current list of [`~transformers.TrainerCallback`] and returns it.
If the callback is not found, returns `None` (and no error is raised).
Args:
callback (`type` or [`~transformers.TrainerCallback`]):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will pop the first member of that class found in the list of callbacks.
Returns:
`Tuple[`[`~transformers.TrainerCallback`], [`~transformers.TrainerCallback`]`]`: The callbacks removed from the
aspect and polarity trainers, if found.
"""
return self.aspect_trainer.pop_callback(callback), self.polarity_trainer.pop_callback(callback)
def remove_callback(self, callback: Union[type, TrainerCallback]) -> None:
"""
Remove a callback from the current list of [`~transformers.TrainerCallback`].
Args:
callback (`type` or [`~transformers.TrainerCallback`]):
A [`~transformers.TrainerCallback`] class or an instance of a [`~transformers.TrainerCallback`]. In the
first case, will remove the first member of that class found in the list of callbacks.
"""
self.aspect_trainer.remove_callback(callback)
self.polarity_trainer.remove_callback(callback)
def push_to_hub(self, repo_id: str, polarity_repo_id: Optional[str] = None, **kwargs) -> None:
"""Upload model checkpoint to the Hub using `huggingface_hub`.
See the full list of parameters for your `huggingface_hub` version in the\
[huggingface_hub documentation](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.ModelHubMixin.push_to_hub).
Args:
repo_id (`str`):
The full repository ID to push to, e.g. `"tomaarsen/setfit-aspect"`.
repo_id (`str`):
The full repository ID to push to, e.g. `"tomaarsen/setfit-sst2"`.
config (`dict`, *optional*):
Configuration object to be saved alongside the model weights.
commit_message (`str`, *optional*):
Message to commit while pushing.
private (`bool`, *optional*, defaults to `False`):
Whether the repository created should be private.
api_endpoint (`str`, *optional*):
The API endpoint to use when pushing the model to the hub.
token (`str`, *optional*):
The token to use as HTTP bearer authorization for remote files.
If not set, will use the token set when logging in with
`transformers-cli login` (stored in `~/.huggingface`).
branch (`str`, *optional*):
The git branch on which to push the model. This defaults to
the default branch as specified in your repository, which
defaults to `"main"`.
create_pr (`boolean`, *optional*):
Whether or not to create a Pull Request from `branch` with that commit.
Defaults to `False`.
allow_patterns (`List[str]` or `str`, *optional*):
If provided, only files matching at least one pattern are pushed.
ignore_patterns (`List[str]` or `str`, *optional*):
If provided, files matching any of the patterns are not pushed.
"""
return self.model.push_to_hub(repo_id=repo_id, polarity_repo_id=polarity_repo_id, **kwargs)
def evaluate(self, dataset: Optional[Dataset] = None) -> Dict[str, Dict[str, float]]:
"""
Computes the metrics for a given classifier.
Args:
dataset (`Dataset`, *optional*):
The dataset to compute the metrics on. If not provided, will use the evaluation dataset passed via
the `eval_dataset` argument at `Trainer` initialization.
Returns:
`Dict[str, Dict[str, float]]`: The evaluation metrics.
"""
aspect_eval_dataset = polarity_eval_dataset = None
if dataset:
aspect_eval_dataset, polarity_eval_dataset = self.preprocess_dataset(
self.model.aspect_model, self.model.polarity_model, dataset
)
return {
"aspect": self.aspect_trainer.evaluate(aspect_eval_dataset),
"polarity": self.polarity_trainer.evaluate(polarity_eval_dataset),
}
| setfit/src/setfit/span/trainer.py/0 | {
"file_path": "setfit/src/setfit/span/trainer.py",
"repo_id": "setfit",
"token_count": 6872
} | 388 |
import string
import numpy as np
import pandas as pd
import pytest
from datasets import Dataset, load_dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from setfit.data import (
SAMPLE_SIZES,
SEEDS,
SetFitDataset,
create_fewshot_splits,
create_fewshot_splits_multilabel,
create_samples,
get_templated_dataset,
sample_dataset,
)
@pytest.fixture
def empty_dataset():
return Dataset.from_dict({})
@pytest.fixture
def dataset():
return Dataset.from_dict(
{
"text": ["label-0 text", "label-1 text"],
"label": [[1, 0], [0, 1]],
}
)
@pytest.fixture
def unbalanced_dataset():
return Dataset.from_dict({"text": string.ascii_letters, "label": [0] + 51 * [1]})
def test_add_to_empty_dataset_defaults(empty_dataset):
augmented_dataset = get_templated_dataset(empty_dataset, candidate_labels=["label-0", "label-1"], multi_label=True)
assert augmented_dataset[:] == {
"text": [
"This sentence is label-0",
"This sentence is label-0",
"This sentence is label-1",
"This sentence is label-1",
],
"label": [[1, 0], [1, 0], [0, 1], [0, 1]],
}
def test_add_to_dataset_defaults(dataset):
augmented_dataset = get_templated_dataset(dataset, candidate_labels=["label-0", "label-1"], multi_label=True)
assert augmented_dataset[:] == {
"text": [
"label-0 text",
"label-1 text",
"This sentence is label-0",
"This sentence is label-0",
"This sentence is label-1",
"This sentence is label-1",
],
"label": [[1, 0], [0, 1], [1, 0], [1, 0], [0, 1], [0, 1]],
}
@pytest.mark.parametrize(
"text_column, label_column",
[
("missing-text", "label"),
("text", "missing-label"),
("missing-text", "missing-label"),
],
)
def test_missing_columns(dataset, text_column, label_column):
with pytest.raises(ValueError):
get_templated_dataset(
dataset,
candidate_labels=["label-0", "label-1"],
text_column=text_column,
label_column=label_column,
)
@pytest.mark.parametrize("sample_size", [8, 16, 32])
def test_subset_is_larger_than_sample_size(sample_size):
data = {"label": [0] * 50 + [1] * 50}
df = pd.DataFrame(data)
sample_df = create_samples(df, sample_size=sample_size, seed=0)
assert len(sample_df) == (sample_size * 2)
@pytest.mark.parametrize("sample_size", [8, 16, 32])
def test_subset_is_smaller_than_sample_size(sample_size):
data = {"label": [0] * 3 + [1] * 3}
df = pd.DataFrame(data)
sample_df = create_samples(df, sample_size=sample_size, seed=0)
assert len(sample_df) == len(df)
def test_expected_number_of_splits():
dataset = Dataset.from_pandas(pd.DataFrame({"label": [0] * 50 + [1] * 50}))
num_labels = 2
splits_ds = create_fewshot_splits(dataset, SAMPLE_SIZES)
assert len(splits_ds) == len(SAMPLE_SIZES) * len(SEEDS)
split: Dataset
for idx, split in enumerate(splits_ds.values()):
sample_size = SAMPLE_SIZES[idx // len(SEEDS)]
# The number of rows is limited by 100 due to the size of the original dataset
assert len(split) == min(sample_size * num_labels, len(dataset))
def test_create_fewshot_splits_with_augmentation():
dataset_name = "sst5"
dataset = load_dataset(f"SetFit/{dataset_name}", split="train")
num_labels = len(set(dataset["label"]))
splits_ds = create_fewshot_splits(
dataset, SAMPLE_SIZES, add_data_augmentation=True, dataset_name=f"SetFit/{dataset_name}"
)
assert len(splits_ds) == len(SAMPLE_SIZES) * len(SEEDS)
split: Dataset
for idx, split in enumerate(splits_ds.values()):
sample_size = SAMPLE_SIZES[idx // len(SEEDS)]
# Each split should have sample_size * num_labels * 2 rows:
# for each label we sample `sample_size`, and then we generate
# another `sample_size` samples through augmentation.
assert len(split) == sample_size * num_labels * 2
def test_create_fewshot_splits_multilabel():
num_samples = 50
dataset = Dataset.from_dict(
{
"text": string.ascii_letters[:50],
"label_one": np.random.randint(2, size=(num_samples,)),
"label_two": np.random.randint(2, size=(num_samples,)),
"label_three": np.random.randint(2, size=(num_samples,)),
}
)
splits_ds = create_fewshot_splits_multilabel(dataset, SAMPLE_SIZES)
assert len(splits_ds) == len(SAMPLE_SIZES) * len(SEEDS)
# We can't safely test the number of rows of each of the splits
# as duplicate samples are removed.
def test_sample_dataset_returns_expected_samples():
num_samples = 2
dataset = Dataset.from_dict({"text": ["hello"] * 50, "label": [0] * 25 + [1] * 25})
samples = sample_dataset(dataset=dataset, num_samples=num_samples)
for label_id in range(num_samples):
assert len(samples.filter(lambda x: x["label"] == label_id)) == num_samples
def test_sample_dataset_with_label_column():
num_samples = 2
label_column = "my_labels"
dataset = Dataset.from_dict({"text": ["hello"] * 50, label_column: [0] * 25 + [1] * 25})
samples = sample_dataset(dataset=dataset, label_column=label_column, num_samples=num_samples)
for label_id in range(num_samples):
assert len(samples.filter(lambda x: x[label_column] == label_id)) == num_samples
def test_sample_dataset_with_unbalanced_ds(unbalanced_dataset):
num_samples = 8
ds = sample_dataset(unbalanced_dataset, num_samples=num_samples)
# The dataset ought to have just `num_samples + 1` rows, as `unbalanced_dataset`
# has one label with more than `num_samples` entries and another label with just 1 row.
# We sample `num_samples` from the former, and 1 from the latter.
assert ds.num_rows == num_samples + 1
@pytest.mark.parametrize(
"dataset",
[
"SetFit/emotion",
"SetFit/ag_news",
"SetFit/amazon_counterfactual_en",
"SetFit/SentEval-CR",
"SetFit/sst5",
"SetFit/enron_spam",
"SetFit/tweet_eval_stance_abortion",
"SetFit/ade_corpus_v2_classification",
],
)
def test_get_augmented_samples(dataset: str):
dataset = get_templated_dataset(reference_dataset=dataset)
assert set(dataset.column_names) == {"text", "label"}
assert len(dataset["text"])
assert len(dataset["label"])
def test_get_augmented_samples_negative():
with pytest.raises(ValueError):
get_templated_dataset(reference_dataset=None, candidate_labels=None)
@pytest.mark.parametrize(
"tokenizer_name",
["sentence-transformers/paraphrase-albert-small-v2", "sentence-transformers/distiluse-base-multilingual-cased-v1"],
)
def test_correct_model_inputs(tokenizer_name):
# Arbitrary testing data
x = list(string.ascii_lowercase)
y = list(range(len(x)))
# Relatively Standard DataLoader setup using a SetFitDataset
# for training a differentiable classification head
tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
dataset = SetFitDataset(x, y, tokenizer)
dataloader = DataLoader(
dataset,
batch_size=2,
collate_fn=dataset.collate_fn,
shuffle=True,
pin_memory=True,
)
# Verify that the x_batch contains exactly those keys that the model requires
x_batch, _ = next(iter(dataloader))
assert set(x_batch.keys()) == set(tokenizer.model_input_names)
def test_preserve_features() -> None:
dataset = load_dataset("SetFit/sst5", split="train[:100]")
label_column = "label_text"
dataset = dataset.class_encode_column(label_column)
train_dataset = sample_dataset(dataset, label_column=label_column, num_samples=8)
assert train_dataset.features[label_column] == dataset.features[label_column]
| setfit/tests/test_data.py/0 | {
"file_path": "setfit/tests/test_data.py",
"repo_id": "setfit",
"token_count": 3442
} | 389 |