text
stringlengths
29
317k
id
stringlengths
22
166
metadata
dict
__index_level_0__
int64
0
231
FROM python:3.10-slim ENV PYTHONDONTWRITEBYTECODE=1 USER root RUN apt-get update && apt-get install -y --no-install-recommends libsndfile1-dev espeak-ng time git g++ cmake pkg-config openssh-client git ENV UV_PYTHON=/usr/local/bin/python RUN pip --no-cache-dir install uv && uv venv && uv pip install --no-cache-dir -U pip setuptools RUN pip install --no-cache-dir 'torch' 'torchvision' 'torchaudio' --index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-deps timm accelerate --extra-index-url https://download.pytorch.org/whl/cpu RUN uv pip install --no-cache-dir librosa "transformers[sklearn,sentencepiece,vision,testing]" seqeval albumentations jiwer RUN pip uninstall -y transformers RUN apt-get clean && rm -rf /var/lib/apt/lists/*
transformers/docker/examples-torch.dockerfile/0
{ "file_path": "transformers/docker/examples-torch.dockerfile", "repo_id": "transformers", "token_count": 277 }
0
# https://docs.nvidia.com/deeplearning/frameworks/pytorch-release-notes/rel-23-11.html#rel-23-11 FROM nvcr.io/nvidia/pytorch:23.04-py3 LABEL maintainer="Hugging Face" ARG DEBIAN_FRONTEND=noninteractive ARG PYTORCH='2.2.0' # Example: `cu102`, `cu113`, etc. ARG CUDA='cu121' RUN apt -y update RUN apt install -y libaio-dev RUN python3 -m pip install --no-cache-dir --upgrade pip ARG REF=main RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF RUN python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing] # Install latest release PyTorch # (PyTorch must be installed before pre-compiling any DeepSpeed c++/cuda ops.) # (https://www.deepspeed.ai/tutorials/advanced-install/#pre-install-deepspeed-ops) RUN python3 -m pip uninstall -y torch torchvision torchaudio && python3 -m pip install --no-cache-dir -U torch==$PYTORCH torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/$CUDA RUN python3 -m pip install --no-cache-dir git+https://github.com/huggingface/accelerate@main#egg=accelerate # Uninstall `transformer-engine` shipped with the base image RUN python3 -m pip uninstall -y transformer-engine # Uninstall `torch-tensorrt` shipped with the base image RUN python3 -m pip uninstall -y torch-tensorrt # recompile apex RUN python3 -m pip uninstall -y apex # RUN git clone https://github.com/NVIDIA/apex # `MAX_JOBS=1` disables parallel building to avoid cpu memory OOM when building image on GitHub Action (standard) runners # TODO: check if there is alternative way to install latest apex # RUN cd apex && MAX_JOBS=1 python3 -m pip install --global-option="--cpp_ext" --global-option="--cuda_ext" --no-cache -v --disable-pip-version-check . # Pre-build **latest** DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout) RUN python3 -m pip uninstall -y deepspeed # This has to be run (again) inside the GPU VMs running the tests. # The installation works here, but some tests fail, if we don't pre-build deepspeed again in the VMs running the tests. # TODO: Find out why test fail. RUN DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 python3 -m pip install deepspeed --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1 # When installing in editable mode, `transformers` is not recognized as a package. # this line must be added in order for python to be aware of transformers. RUN cd transformers && python3 setup.py develop # The base image ships with `pydantic==1.8.2` which is not working - i.e. the next command fails RUN python3 -m pip install -U --no-cache-dir "pydantic>=2.0.0" RUN python3 -c "from deepspeed.launcher.runner import main"
transformers/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile/0
{ "file_path": "transformers/docker/transformers-pytorch-deepspeed-latest-gpu/Dockerfile", "repo_id": "transformers", "token_count": 895 }
1
# تشريح عملية تدريب النموذج لفهم تقنيات تحسين الأداء التي يمكن تطبيقها لتحسين كفاءة استخدام الذاكرة وسرعة تدريب النموذج، من المفيد التعرف على كيفية استخدام وحدة معالجة الرسوميات (GPU) أثناء التدريب، وكيف تختلف كثافة العمليات الحسابية باختلاف العملية التي يتم تنفيذها. لنبدأ باستكشاف مثال توضيحي على استخدام وحدة GPU وتشغيل تدريب نموذج. وللتوضيح، سنحتاج إلى تثبيت بعض المكتبات: ```bash pip install transformers datasets accelerate nvidia-ml-py3 ``` تتيح مكتبة `nvidia-ml-py3` إمكانية مراقبة استخدام الذاكرة في النماذج من داخل بايثون. قد تكون على دراية بأمر `nvidia-smi` في الجهاز - تسمح هذه المكتبة بالوصول إلى نفس المعلومات مباشرة في بايثون. ثم، نقوم بإنشاء بعض البيانات الوهمية:معرّفات رموز عشوائية بين 100 و30000 وتصنيفات ثنائية للمصنف. في المجموع، نحصل على 512 تسلسلًا، لكل منها طول 512، ونخزنها في [`~datasets.Dataset`] بتنسيق PyTorch. ```py >>> import numpy as np >>> from datasets import Dataset >>> seq_len, dataset_size = 512, 512 >>> dummy_data = { ... "input_ids": np.random.randint(100, 30000, (dataset_size, seq_len)), ... "labels": np.random.randint(0, 1, (dataset_size)), ... } >>> ds = Dataset.from_dict(dummy_data) >>> ds.set_format("pt") ``` لطباعة إحصائيات موجزة لاستخدام وحدة GPU وتشغيل التدريب مع [`Trainer`]، نقوم بتعريف دالتين مساعدتين: ```py >>> from pynvml import * >>> def print_gpu_utilization(): ... nvmlInit() ... handle = nvmlDeviceGetHandleByIndex(0) ... info = nvmlDeviceGetMemoryInfo(handle) ... print(f"GPU memory occupied: {info.used//1024**2} MB.") >>> def print_summary(result): ... print(f"Time: {result.metrics['train_runtime']:.2f}") ... print(f"Samples/second: {result.metrics['train_samples_per_second']:.2f}") ... print_gpu_utilization() ``` دعنا نتأكد من أننا نبدأ بذاكرة وحدة GPU خالية: ```py >>> print_gpu_utilization() GPU memory occupied: 0 MB. ``` يبدو ذلك جيدًا: لم يتم شغل ذاكرة وحدة معالجة الرسومات كما نتوقع قبل تحميل أي نماذج. إذا لم يكن الأمر كذلك على جهازك، فتأكد من إيقاف جميع العمليات التي تستخدم ذاكرة وحدة GPU. ومع ذلك، لا يمكن للمستخدم استخدام كل ذاكرة وحدة GPU الفارغة. عندما يتم تحميل نموذج إلى وحدة GPU، يتم أيضًا تحميل النواة، والتي يمكن أن تستهلك 1-2 جيجابايت من الذاكرة. ولرؤية مقدار ذلك، نقوم بتحميل مصفوفة صغيرة إلى وحدة GPU والتي تؤدي إلى تحميل النواة أيضًا. ```py >>> import torch >>> torch.ones((1, 1)).to("cuda") >>> print_gpu_utilization() GPU memory occupied: 1343 MB. ``` نلاحظ أن النواة وحدها تستهلك 1.3 جيجابايت من ذاكرة وحدة GPU. الآن دعنا نرى مقدار المساحة التي يستخدمها النموذج. ## تحميل النموذج أولاً، نقوم بتحميل نموذج `google-bert/bert-large-uncased`. نقوم بتحميل أوزان النموذج مباشرة إلى وحدة GPU حتى نتمكن من التحقق من مقدار المساحة التي تستخدمها الأوزان فقط. ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("google-bert/bert-large-uncased").to("cuda") >>> print_gpu_utilization() GPU memory occupied: 2631 MB. ``` يمكننا أن نرى أن أوزان النموذج وحدها تستهلك 1.3 جيجابايت من ذاكرة وحدة GPU. يعتمد الرقم الدقيق على وحدة GPU المحددة التي تستخدمها. لاحظ أنه في وحدات GPU الأحدث، قد يستغرق النموذج في بعض الأحيان مساحة أكبر نظرًا لأن الأوزان يتم تحميلها بطريقة مُحسّنة تُسرّع من استخدام النموذج. الآن يمكننا أيضًا التحقق بسرعة مما إذا كنا نحصل على نفس النتيجة كما هو الحال مع `nvidia-smi` CLI: ```bash nvidia-smi ``` ```bash Tue Jan 11 08:58:05 2022 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 460.91.03 Driver Version: 460.91.03 CUDA Version: 11.2 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 Tesla V100-SXM2... On | 00000000:00:04.0 Off | 0 | | N/A 37C P0 39W / 300W | 2631MiB / 16160MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 3721 C ...nvs/codeparrot/bin/python 2629MiB | +-----------------------------------------------------------------------------+ ``` نحصل على نفس الرقم كما كان من قبل، ويمكنك أيضًا أن ترى أننا نستخدم GPU من طراز V100 مع 16 جيجابايت من الذاكرة. لذا الآن يمكننا بدء تدريب النموذج ورؤية كيف يتغير استخدام ذاكرة GPU. أولاً، نقوم بإعداد بعض معاملات التدريب القياسية: ```py default_args = { "output_dir": "tmp"، "eval_strategy": "steps"، "num_train_epochs": 1، "log_level": "error"، "report_to": "none"، } ``` <Tip> إذا كنت تخطط لتشغيل عدة تجارب، من أجل مسح الذاكرة بشكل صحيح بين التجارب، قم بإعادة تشغيل نواة Python بين التجارب. </Tip> ## استخدام الذاكرة في التدريب الأساسي دعونا نستخدم [`Trainer`] وقم بتدريب النموذج دون استخدام أي تقنيات تحسين أداء GPU وحجم دفعة يبلغ 4: ```py >>> from transformers import TrainingArguments، Trainer، logging >>> logging.set_verbosity_error() >>> training_args = TrainingArguments(per_device_train_batch_size=4، **default_args) >>> trainer = Trainer(model=model، args=training_args، train_dataset=ds) >>> result = trainer.train() >>> print_summary(result) ``` ``` الوقت: 57.82 العينات / الثانية: 8.86 ذاكرة GPU المشغولة: 14949 ميجابايت. ``` يمكننا أن نرى أن حجم دفعة صغير نسبيًا يملأ تقريبًا ذاكرة GPU بالكامل. ومع ذلك، غالبًا ما يؤدي حجم دفعة أكبر في تقارب نموذج أسرع أو أداء أفضل في النهاية. لذلك نريد أن نضبط حجم الدفعة وفقًا لاحتياجات النموذج لدينا وليس مع قيود وحدة GPU. ما يثير الاهتمام هو أننا نستخدم ذاكرة أكثر بكثير من حجم النموذج. لفهم سبب ذلك بشكل أفضل، دعنا نلقي نظرة على عمليات النموذج واحتياجاته من الذاكرة. ## تشريح عمليات النموذج تتضمن بنية المحولات 3 مجموعات رئيسية من العمليات مُجمعة أدناه حسب كثافة العمليات الحسابية. 1. **عمليات ضرب المصفوفات** تقوم الطبقات الخطية ومكونات الانتباه متعدد الرؤوس جميعها بعمليات ضرب ** المصفوفة بالمصفوفة** على دفعات. هذه العمليات هي أكثر أجزاء تدريب المحولات كثافة من الناحية الحسابية. 2. **عمليات التسوية الإحصائية** تُعد عمليات Softmax والتسوية الطبقية أقل كثافة من ناحية الحسابية من عمليات ضرب المصفوفات، وتنطوي على عملية أو أكثر من عمليات **الاختزال**، والتي يتم تطبيق نتيجتها بعد ذلك عبر خريطة. 3. **العمليات على مستوى العناصر** هذه هي العمليات المتبقية: **الانحيازات، والتسرب، ووظائف التنشيط، والوصلات المتبقية**. هذه هي عمليات أقل كثافة من الناحية الحسابية. يمكن أن تكون هذه المعرفة مفيدة لمعرفة عند تحليل اختناقات الأداء. هذا الملخص مُشتق من [نقل البيانات هو كل ما تحتاجه: دراسة حالة حول تحسين المحولات 2020](https://arxiv.org/abs/2007.00072) ## تشريح ذاكرة النموذج لقد رأينا أن تدريب النموذج يستخدم ذاكرة أكثر بكثير من مجرد وضع النموذج على GPU. ويرجع ذلك إلى هناك العديد من المكونات أثناء التدريب التي تستخدم ذاكرة GPU. المكونات الموجودة في ذاكرة GPU هي التالية: 1. أوزان النموذج 2. الدول المُحسّن 3. المُتدرجات 4. تنشيطات المسار الأمامي المحفوظة لحساب المُتدرجات 5. المخازن المؤقتة 6. ذاكرة محددة الوظائف يتطلب نموذج نموذجي مدرب بدقة مختلطة 18 بايت للمُحسّن AdamW كل معلمة نموذج بالإضافة إلى ذاكرة التنشيط. للاستدلال لا توجد حالات مُحسّن و مُتدرجات، لذلك يمكننا طرح تلك. وهكذا ننتهي مع 6 بايت لكل معلمة نموذج للدقة المختلطة الاستدلال، بالإضافة إلى ذاكرة التنشيط. دعنا نلقي نظرة على التفاصيل. **أوزان النموذج:** - 4 بايت * عدد المعلمات للتدريب على دقة fp32 - 6 بايت * عدد المعلمات لتدريب الدقة المختلطة (يحافظ على نموذج في fp32 وآخر بدقة fp16 في الذاكرة) **حالات المُحسّن:** - 8 بايت * عدد المعلمات للمُحسّن AdamW العادي (يحافظ على حالتين) - 2 بايت * عدد المعلمات لمُحسّنات 8 بت AdamW مثل [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) - 4 بايت * عدد المعلمات لمُحسّنات مثل SGD مع الزخم momentum (يحافظ على حالة واحدة فقط) **المُتدرجات** - 4 بايت * عدد المعلمات للتدريب بدقة fp32 أو بدقة مختلطة (المُتدرجات تكون دائمًا بدقة fp32) **تنشيطات المسار الأمامي** - يعتمد الحجم على العديد من العوامل، وأهمها طول التسلسل وحجم المخفية وحجم الدُفعة. هناك المدخلات والمخرجات لذي يتم تمريرها وإرجاعها بواسطة وظائف المسار الأمامي والمسار الخلفي وتنشيطات المسار الأمامي المحفوظة لحساب المُتدرجات. **الذاكرة المؤقتة** بالإضافة إلى ذلك، هناك جميع أنواع المتغيرات المؤقتة التي يتم تحريرها بمجرد الانتهاء من الحساب، ولكن في لحظة يمكن أن تتطلب هذه المتغيرات المؤقتة ذاكرة إضافية ويقد تؤدي إلى نفاد الذاكرة المُخصصة (OOM). لذلك، عند البرمجة، من المهم التفكير بشكل استراتيجي حول هذه المتغيرات المؤقتة وأحيانًا تحريرها بشكل صريح بمجرد عدم الحاجة إليها. **ذاكرة محددة الوظائف** ثم، قد يكون لبرنامجك احتياجات خاصة بالذاكرة. على سبيل المثال، عند إنشاء نص باستخدام البحث الشعاعي، يحتاج البرنامج إلى الاحتفاظ بنسخ متعددة من المدخلات والمخرجات. **سرعة تنفيذ `forward` مقابل `backward`** بالنسبة للالتفافات والطبقات الخطية، هناك ضِعف عدد العمليات 2x flops في المسار الخلفى مقارنة بالمسار الأمامي، والتي يُترجم عمومًا إلى ~2x أبطأ (أحيانًا أكثر، لأن الأحجام في المسار الخلفى تميل إلى أن تكون أكثر صعوبة). عادةً ما تكون عمليات التنشيط محدودة بعرض النطاق الترددي، ومن المعتاد أن يتعين على التنشيط قراءة المزيد من البيانات في المسار الخلفى أكثر من المسار الأمامى. (على سبيل المثال، قراءة التنشيط المسار الأمامى مرة واحدة، وتكتب مرة واحدة، وبينما تقرأ عملية التنشيط الخلفي مرتين، gradOutput وإخراج الأمام، وتكتب مرة واحدة، gradInput). كما ترى، هناك بضعة أماكن يمكننا فيها توفير ذاكرة GPU أو تسريع العمليات. الآن بعد أن فهمت ما يؤثر على استخدام GPU وسرعة الحساب، راجع صفحة وثائق [أساليب وأدوات التدريب الفعال على GPU واحد](perf_train_gpu_one) لمعرفة المزيد حول تقنيات تحسين الأداء.
transformers/docs/source/ar/model_memory_anatomy.md/0
{ "file_path": "transformers/docs/source/ar/model_memory_anatomy.md", "repo_id": "transformers", "token_count": 8004 }
2
# التصدير إلى ONNX غالباً ما يتطلب نشر نماذج 🤗 Transformers في بيئات الإنتاج أو يمكن أن يستفيد من تصدير النماذج إلى تنسيق تسلسلي يُمكن تحميله وتنفيذه على أجهزة وبرامج تشغيل مُتخصصة. 🤗 Optimum هو امتداد لـ Transformers يمكّن من تصدير النماذج من PyTorch أو TensorFlow إلى تنسيقات مُتسلسلة مثل ONNX و TFLite من خلال وحدة `exporters` الخاصة به. يوفر 🤗 Optimum أيضًا مجموعة من أدوات تحسين الأداء لتدريب النماذج وتشغيلها على أجهزة مستهدفة بكفاءة قصوى. يوضح هذا الدليل كيفية تصدير نماذج 🤗 Transformers إلى ONNX باستخدام 🤗 Optimum، وللحصول على الدليل الخاص بتصدير النماذج إلى TFLite، يُرجى الرجوع إلى صفحة [التصدير إلى TFLite](tflite). ## التصدير إلى ONNX مجمد [ONNX (Open Neural Network Exchange)](http://onnx.ai) هو معيار مفتوح يُحدد مجموعة مشتركة من العوامل وتنسيق ملف مشترك لتمثيل نماذج التعلم العميق في مجموعة متنوعة واسعة من الأطر، بما في ذلك PyTorch وTensorFlow. عندما يتم تصدير نموذج إلى تنسيق ONNX، يتم استخدام هذه المشغلات لبناء رسم بياني حاسوبي (يُطلق عليه غالبًا اسم _تمثيل وسيط_) والذي يمثل تدفق البيانات عبر الشبكة العصبية. من خلال عرض رسم بياني بعوامل وأنواع بيانات معيارية، يُسهّل ONNX التبديل بين الأطر. على سبيل المثال، يُمكن تصدير نموذج مدرب في PyTorch إلى تنسيق ONNX ثم استيراده في TensorFlow (والعكس صحيح). بمجرد التصدير إلى تنسيق ONNX، يُمكن: - تحسين النموذج للاستدلال عبر تقنيات مثل [تحسين الرسم البياني](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/optimization) و [التكميم](https://huggingface.co/docs/optimum/onnxruntime/usage_guides/quantization). - تشغيله باستخدام ONNX Runtime عبر فئات [`ORTModelForXXX`](https://huggingface.co/docs/optimum/onnxruntime/package_reference/modeling_ort)، والتي تتبع نفس واجهة برمجة التطبيقات (API) لـ `AutoModel` التي اعتدت عليها في 🤗 Transformers. - تشغيله باستخدام [قنوات معالجة الاستدلال مُحسّنة](https://huggingface.co/docs/optimum/main/en/onnxruntime/usage_guides/pipelines)، والتي لها نفس واجهة برمجة التطبيقات (API) مثل وظيفة [`pipeline`] في 🤗 Transformers. يوفر 🤗 Optimum دعمًا لتصدير ONNX من خلال الاستفادة من كائنات التكوين. تأتي كائنات التكوين هذه جاهزة لعدد من معماريات النماذج، وقد تم تصميمها لتكون قابلة للتوسعة بسهولة إلى معماريات أخرى. للاطلاع على قائمة بالتكوينات الجاهزة، يُرجى الرجوع إلى [وثائق 🤗 Optimum](https://huggingface.co/docs/optimum/exporters/onnx/overview). هناك طريقتان لتصدير نموذج 🤗 Transformers إلى ONNX، نعرض هنا كليهما: - التصدير باستخدام 🤗 Optimum عبر واجهة سطر الأوامر (CLI). - التصدير باستخدام 🤗 Optimum مع `optimum.onnxruntime`. ### تصدير نموذج 🤗 Transformers إلى ONNX باستخدام واجهة سطر الأوامر لتصدير نموذج 🤗 Transformers إلى ONNX، قم أولاً بتثبيت اعتماد إضافي: ```bash pip install optimum[exporters] ``` للاطلاع على جميع المعامﻻت المتاحة، يرجى الرجوع إلى [وثائق 🤗 Optimum](https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli)، أو عرض المساعدة في سطر الأوامر: ```bash optimum-cli export onnx --help ``` ```bash optimum-cli export onnx --help ``` لتصدير نقطة تفتيش نموذج من 🤗 Hub، على سبيل المثال، `distilbert/distilbert-base-uncased-distilled-squad`، قم بتشغيل الأمر التالي: ```bash optimum-cli export onnx --model distilbert/distilbert-base-uncased-distilled-squad distilbert_base_uncased_squad_onnx/ ``` يجب أن تشاهد السجلات التي تشير إلى التقدم المحرز وتظهر المكان الذي تم فيه حفظ ملف `model.onnx` الناتج، مثل هذا: ```bash Validating ONNX model distilbert_base_uncased_squad_onnx/model.onnx... -[✓] ONNX model output names match reference model (start_logits, end_logits) - Validating ONNX Model output "start_logits": -[✓] (2, 16) matches (2, 16) -[✓] all values close (atol: 0.0001) - Validating ONNX Model output "end_logits": -[✓] (2, 16) matches (2, 16) -[✓] all values close (atol: 0.0001) The ONNX export succeeded and the exported model was saved at: distilbert_base_uncased_squad_onnx ``` يوضح المثال أعلاه تصدير نقطة تفتيش من 🤗 Hub. عند تصدير نموذج محلي، تأكد أولاً من حفظ ملفات أوزان النموذج ومحول الرموز في نفس الدليل (`local_path`). عند استخدام واجهة سطر الأوامر، قم بتمرير `local_path` إلى وسيط `model` بدلاً من اسم نقطة التفتيش على 🤗 Hub وقدم وسيط `--task`. يمكنك مراجعة قائمة المهام المدعومة في [وثائق 🤗 Optimum](https://huggingface.co/docs/optimum/exporters/task_manager). إذا لم يتم توفير وسيط `task`، فسيتم تعيينه افتراضيًا إلى هندسة النموذج دون أي رأس محدد للمهمة. ```bash optimum-cli export onnx --model local_path --task question-answering distilbert_base_uncased_squad_onnx/ ``` يمكن بعد ذلك تشغيل ملف `model.onnx` الناتج على أحد [المسرعات](https://onnx.ai/supported-tools.html#deployModel) العديدة التي تدعم معيار ONNX. على سبيل المثال، يمكننا تحميل النموذج وتشغيله باستخدام [ONNX Runtime](https://onnxruntime.ai/) كما يلي: ```python >>> from transformers import AutoTokenizer >>> from optimum.onnxruntime import ORTModelForQuestionAnswering >>> tokenizer = AutoTokenizer.from_pretrained("distilbert_base_uncased_squad_onnx") >>> model = ORTModelForQuestionAnswering.from_pretrained("distilbert_base_uncased_squad_onnx") >>> inputs = tokenizer("What am I using?", "Using DistilBERT with ONNX Runtime!", return_tensors="pt") >>> outputs = model(**inputs) ``` تكون العملية مماثلة بالنسبة إلى نقاط تفتيش TensorFlow على Hub. على سبيل المثال، إليك كيفية تصدير نقطة تفتيش TensorFlow نقية من [منظمة Keras](https://huggingface.co/keras-io): ```bash optimum-cli export onnx --model keras-io/transformers-qa distilbert_base_cased_squad_onnx/ ``` ### تصدير نموذج 🤗 Transformers إلى ONNX باستخدام `optimum.onnxruntime` كبديل لواجهة سطر الأوامر، يُمكنك تصدير نموذج 🤗 Transformers إلى ONNX برمجيًا كما يلي: ```python >>> from optimum.onnxruntime import ORTModelForSequenceClassification >>> from transformers import AutoTokenizer >>> model_checkpoint = "distilbert_base_uncased_squad" >>> save_directory = "onnx/" >>> # تحميل نموذج من transformers وتصديره إلى ONNX >>> ort_model = ORTModelForSequenceClassification.from_pretrained(model_checkpoint, export=True) >>> tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) >>> # حفظ نموذج onnx ومجزىء النصوص >>> ort_model.save_pretrained(save_directory) >>> tokenizer.save_pretrained(save_directory) ``` ### تصدير نموذج لهندسة غير مدعومة إذا كنت ترغب في المساهمة من خلال إضافة دعم لنموذج لا يُمكن تصديره حاليًا، فيجب عليك أولاً التحقق مما إذا كان مدعومًا في [`optimum.exporters.onnx`](https://huggingface.co/docs/optimum/exporters/onnx/overview)، وإذا لم يكن مدعومًا، [فيمكنك المساهمة في 🤗 Optimum](https://huggingface.co/docs/optimum/exporters/onnx/usage_guides/contribute) مُباشرةً. ### تصدير نموذج باستخدام `transformers.onnx` <Tip warning={true}> لم يعد يتم دعم `tranformers.onnx` يُرجى تصدير النماذج باستخدام 🤗 Optimum كما هو موضح أعلاه. سيتم إزالة هذا القسم في الإصدارات القادمة. </Tip> لتصدير نموذج 🤗 Transformers إلى ONNX باستخدام `tranformers.onnx`، ثبّت التبعيات الإضافية: ```bash pip install transformers[onnx] ``` استخدم حزمة `transformers.onnx` كنموذج Python لتصدير نقطة حفظ باستخدام تكوين جاهز: ```bash python -m transformers.onnx --model=distilbert/distilbert-base-uncased onnx/ ``` يُصدّر هذا رسمًا بيانيًا ONNX لنقطة الحفظ المُحددة بواسطة وسيطة `--model`. مرر أي نقطة حفظ على 🤗 Hub أو نقطة حفظ مُخزنة محليًا. يُمكن بعد ذلك تشغيل ملف `model.onnx` الناتج على أحد المُسرعات العديدة التي تدعم معيار ONNX. على سبيل المثال، قم بتحميل وتشغيل النموذج باستخدام ONNX Runtime كما يلي: ```python >>> from transformers import AutoTokenizer >>> from onnxruntime import InferenceSession >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") >>> session = InferenceSession("onnx/model.onnx") >>> # يتوقع ONNX Runtime مصفوفات NumPy كمدخلات >>> inputs = tokenizer("Using DistilBERT with ONNX Runtime!", return_tensors="np") >>> outputs = session.run(output_names=["last_hidden_state"], input_feed=dict(inputs)) ``` يُمكن الحصول على أسماء المخرجات المطلوبة (مثل `["last_hidden_state"]`) من خلال إلقاء نظرة على تكوين ONNX لكل نموذج. على سبيل المثال، بالنسبة لـ DistilBERT، لدينا: ```python >>> from transformers.models.distilbert import DistilBertConfig, DistilBertOnnxConfig >>> config = DistilBertConfig() >>> onnx_config = DistilBertOnnxConfig(config) >>> print(list(onnx_config.outputs.keys())) ["last_hidden_state"] ``` العمليات مُتطابقة لنقاط الحفظ TensorFlow على Hub. على سبيل المثال، صدّر نقطة حفظ TensorFlow خالصة كما يلي: ```bash python -m transformers.onnx --model=keras-io/transformers-qa onnx/ ``` لتصدير نموذج مُخزن محليًا، احفظ أوزان النموذج ومجزىء اللغوى في نفس الدليل (على سبيل المثال `local-pt-checkpoint`)، ثم قم بتصديره إلى ONNX عن طريق توجيه وسيط `--model` لحزمة `transformers.onnx` إلى الدليل المطلوب: ```bash python -m transformers.onnx --model=local-pt-checkpoint onnx/ ```
transformers/docs/source/ar/serialization.md/0
{ "file_path": "transformers/docs/source/ar/serialization.md", "repo_id": "transformers", "token_count": 5878 }
3
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Vorverarbeiten [[open-in-colab]] Bevor Sie Ihre Daten in einem Modell verwenden können, müssen die Daten in ein für das Modell akzeptables Format gebracht werden. Ein Modell versteht keine Rohtexte, Bilder oder Audiodaten. Diese Eingaben müssen in Zahlen umgewandelt und zu Tensoren zusammengesetzt werden. In dieser Anleitung werden Sie: * Textdaten mit einem Tokenizer vorverarbeiten. * Bild- oder Audiodaten mit einem Feature Extractor vorverarbeiten. * Daten für eine multimodale Aufgabe mit einem Prozessor vorverarbeiten. ## NLP <Youtube id="Yffk5aydLzg"/> Das wichtigste Werkzeug zur Verarbeitung von Textdaten ist ein [Tokenizer](main_classes/tokenizer). Ein Tokenizer zerlegt Text zunächst nach einer Reihe von Regeln in *Token*. Die Token werden in Zahlen umgewandelt, die zum Aufbau von Tensoren als Eingabe für ein Modell verwendet werden. Alle zusätzlichen Eingaben, die ein Modell benötigt, werden ebenfalls vom Tokenizer hinzugefügt. <Tip> Wenn Sie ein vortrainiertes Modell verwenden möchten, ist es wichtig, den zugehörigen vortrainierten Tokenizer zu verwenden. Dadurch wird sichergestellt, dass der Text auf die gleiche Weise aufgeteilt wird wie das Pretraining-Korpus und die gleichen entsprechenden Token-zu-Index (in der Regel als *vocab* bezeichnet) während des Pretrainings verwendet werden. </Tip> Laden Sie einen vortrainierten Tokenizer mit der Klasse [AutoTokenizer], um schnell loszulegen. Damit wird das *vocab* heruntergeladen, das verwendet wird, wenn ein Modell vortrainiert wird. ### Tokenize Laden Sie einen vortrainierten Tokenizer mit [`AutoTokenizer.from_pretrained`]: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-cased") ``` Dann übergeben Sie Ihren Satz an den Tokenizer: ```py >>> encoded_input = tokenizer("Do not meddle in the affairs of wizards, for they are subtle and quick to anger.") >>> print(encoded_input) {'input_ids': [101, 2079, 2025, 19960, 10362, 1999, 1996, 3821, 1997, 16657, 1010, 2005, 2027, 2024, 11259, 1998, 4248, 2000, 4963, 1012, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` Der Tokenizer gibt ein Wörterbuch mit drei wichtigen Elementen zurück: * [input_ids](glossary#input-ids) sind die Indizes, die den einzelnen Token im Satz entsprechen. * [attention_mask](glossary#attention-mask) gibt an, ob ein Token beachtet werden soll oder nicht. * [token_type_ids](glossary#token-type-ids) gibt an, zu welcher Sequenz ein Token gehört, wenn es mehr als eine Sequenz gibt. Sie können die `input_ids` dekodieren, um die ursprüngliche Eingabe zurückzugeben: ```py >>> tokenizer.decode(encoded_input["input_ids"]) '[CLS] Do not meddle in the affairs of wizards, for they are subtle and quick to anger. [SEP]' ``` Wie Sie sehen können, hat der Tokenisierer zwei spezielle Token - `CLS` und `SEP` (Klassifikator und Separator) - zum Satz hinzugefügt. Nicht alle Modelle benötigen spezielle Token, aber wenn dies der Fall ist, fügt der Tokenisierer sie automatisch für Sie hinzu. Wenn Sie mehrere Sätze verarbeiten wollen, übergeben Sie die Sätze als Liste an den Tokenizer: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_inputs = tokenizer(batch_sentences) >>> print(encoded_inputs) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1]]} ``` ### Pad Dies bringt uns zu einem wichtigen Thema. Wenn Sie einen Haufen von Sätzen verarbeiten, sind diese nicht immer gleich lang. Das ist ein Problem, weil Tensoren, die Eingabe für das Modell, eine einheitliche Form haben müssen. Padding ist eine Strategie, die sicherstellt, dass Tensoren rechteckig sind, indem ein spezielles *Padding-Token* zu Sätzen mit weniger Token hinzugefügt wird. Setzen Sie den Parameter "padding" auf "true", um die kürzeren Sequenzen im Stapel so aufzufüllen, dass sie der längsten Sequenz entsprechen: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` Beachten Sie, dass der Tokenizer den ersten und den dritten Satz mit einer "0" aufgefüllt hat, weil sie kürzer sind! ### Kürzung Auf der anderen Seite des Spektrums kann es vorkommen, dass eine Sequenz zu lang für ein Modell ist. In diesem Fall müssen Sie die Sequenz auf eine kürzere Länge kürzen. Setzen Sie den Parameter "truncation" auf "true", um eine Sequenz auf die vom Modell akzeptierte Höchstlänge zu kürzen: ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True) >>> print(encoded_input) {'input_ids': [[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]]} ``` ### Tensoren erstellen Schließlich möchten Sie, dass der Tokenizer die tatsächlichen Tensoren zurückgibt, die dem Modell zugeführt werden. Setzen Sie den Parameter `return_tensors` entweder auf `pt` für PyTorch, oder `tf` für TensorFlow: <frameworkcontent> <pt> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="pt") >>> print(encoded_input) {'input_ids': tensor([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]]), 'token_type_ids': tensor([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]])} ``` </pt> <tf> ```py >>> batch_sentences = [ ... "But what about second breakfast?", ... "Don't think he knows about second breakfast, Pip.", ... "What about elevensies?", ... ] >>> encoded_input = tokenizer(batch_sentences, padding=True, truncation=True, return_tensors="tf") >>> print(encoded_input) {'input_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[101, 1252, 1184, 1164, 1248, 6462, 136, 102, 0, 0, 0, 0, 0, 0, 0], [101, 1790, 112, 189, 1341, 1119, 3520, 1164, 1248, 6462, 117, 21902, 1643, 119, 102], [101, 1327, 1164, 5450, 23434, 136, 102, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'token_type_ids': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 9), dtype=int32, numpy= array([[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0]], dtype=int32)>} ``` </tf> </frameworkcontent> ## Audio Audioeingaben werden anders vorverarbeitet als Texteingaben, aber das Endziel bleibt dasselbe: numerische Sequenzen zu erstellen, die das Modell verstehen kann. Ein [feature extractor](main_classes/feature_extractor) dient dem ausdrücklichen Zweck, Merkmale aus Rohbild- oder Audiodaten zu extrahieren und in Tensoren zu konvertieren. Bevor Sie beginnen, installieren Sie 🤗 Datasets, um einen Audio-Datensatz zu laden, mit dem Sie experimentieren können: ```bash pip install datasets ``` Laden Sie den [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) Datensatz (weitere Informationen zum Laden eines Datensatzes finden Sie im 🤗 [Datasets tutorial](https://huggingface.co/docs/datasets/load_hub)): ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") ``` Greifen Sie auf das erste Element der `audio`-Spalte zu, um einen Blick auf die Eingabe zu werfen. Durch den Aufruf der Spalte "audio" wird die Audiodatei automatisch geladen und neu gesampelt: ```py >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` Dies gibt drei Elemente zurück: * "array" ist das Sprachsignal, das als 1D-Array geladen - und möglicherweise neu gesampelt - wurde. * Pfad" zeigt auf den Speicherort der Audiodatei. * `sampling_rate` bezieht sich darauf, wie viele Datenpunkte im Sprachsignal pro Sekunde gemessen werden. ### Resample Für dieses Tutorial werden Sie das Modell [Wav2Vec2](https://huggingface.co/facebook/wav2vec2-base) verwenden. Wie Sie aus der Modellkarte ersehen können, ist das Wav2Vec2-Modell auf 16kHz abgetastetes Sprachaudio vortrainiert. Es ist wichtig, dass die Abtastrate Ihrer Audiodaten mit der Abtastrate des Datensatzes übereinstimmt, der für das Pre-Training des Modells verwendet wurde. Wenn die Abtastrate Ihrer Daten nicht dieselbe ist, müssen Sie Ihre Audiodaten neu abtasten. Der Datensatz [MInDS-14](https://huggingface.co/datasets/PolyAI/minds14) hat zum Beispiel eine Abtastrate von 8000 kHz. Um das Wav2Vec2-Modell mit diesem Datensatz verwenden zu können, müssen Sie die Abtastrate auf 16 kHz erhöhen: ```py >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") >>> dataset[0]["audio"] {'array': array([ 0. , 0.00024414, -0.00024414, ..., -0.00024414, 0. , 0. ], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 8000} ``` 1. Verwenden Sie die Methode [`~datasets.Dataset.cast_column`] von 🤗 Datasets, um die Abtastrate auf 16kHz zu erhöhen: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=16_000)) ``` 2. Laden Sie die Audiodatei: ```py >>> dataset[0]["audio"] {'array': array([ 2.3443763e-05, 2.1729663e-04, 2.2145823e-04, ..., 3.8356509e-05, -7.3497440e-06, -2.1754686e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/f14948e0e84be638dd7943ac36518a4cf3324e8b7aa331c5ab11541518e9368c/en-US~JOINT_ACCOUNT/602ba55abb1e6d0fbce92065.wav', 'sampling_rate': 16000} ``` Wie Sie sehen können, ist die Abtastrate jetzt 16kHz! ### Merkmalsextraktor Der nächste Schritt ist das Laden eines Merkmalsextraktors, um die Eingabe zu normalisieren und aufzufüllen. Beim Auffüllen von Textdaten wird für kürzere Sequenzen ein `0` hinzugefügt. Die gleiche Idee gilt für Audiodaten, und der Audio-Feature-Extraktor fügt eine `0` - interpretiert als Stille - zu `array` hinzu. Laden Sie den Merkmalsextraktor mit [`AutoFeatureExtractor.from_pretrained`]: ```py >>> from transformers import AutoFeatureExtractor >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base") ``` Übergeben Sie das Audio-"Array" an den Feature-Extraktor. Wir empfehlen auch, das Argument `sampling_rate` im Feature Extractor hinzuzufügen, um eventuell auftretende stille Fehler besser zu beheben. ```py >>> audio_input = [dataset[0]["audio"]["array"]] >>> feature_extractor(audio_input, sampling_rate=16000) {'input_values': [array([ 3.8106556e-04, 2.7506407e-03, 2.8015103e-03, ..., 5.6335266e-04, 4.6588284e-06, -1.7142107e-04], dtype=float32)]} ``` ### Auffüllen und Kürzen Genau wie beim Tokenizer können Sie variable Sequenzen in einem Stapel durch Auffüllen oder Abschneiden behandeln. Werfen Sie einen Blick auf die Sequenzlänge dieser beiden Audiobeispiele: ```py >>> dataset[0]["audio"]["array"].shape (173398,) >>> dataset[1]["audio"]["array"].shape (106496,) ``` Wie Sie sehen können, hat das erste Beispiel eine längere Sequenz als das zweite Beispiel. Lassen Sie uns eine Funktion erstellen, die den Datensatz vorverarbeitet. Geben Sie eine maximale Länge der Probe an, und der Feature-Extraktor wird die Sequenzen entweder auffüllen oder abschneiden, damit sie dieser Länge entsprechen: ```py >>> def preprocess_function(examples): ... audio_arrays = [x["array"] for x in examples["audio"]] ... inputs = feature_extractor( ... audio_arrays, ... sampling_rate=16000, ... padding=True, ... max_length=100000, ... truncation=True, ... ) ... return inputs ``` Wenden Sie die Funktion auf die ersten paar Beispiele im Datensatz an: ```py >>> processed_dataset = preprocess_function(dataset[:5]) ``` Schauen Sie sich nun noch einmal die verarbeiteten Beispiel-Längen an: ```py >>> processed_dataset["input_values"][0].shape (100000,) >>> processed_dataset["input_values"][1].shape (100000,) ``` Die Länge der ersten beiden Beispiele entspricht nun der von Ihnen angegebenen Maximallänge. ## Bildverarbeitung Ein Merkmalsextraktor wird auch verwendet, um Bilder für Bildverarbeitungsaufgaben zu verarbeiten. Auch hier besteht das Ziel darin, das Rohbild in eine Reihe von Tensoren als Eingabe zu konvertieren. Laden wir den [food101](https://huggingface.co/datasets/food101) Datensatz für dieses Tutorial. Verwenden Sie den Parameter 🤗 Datasets `split`, um nur eine kleine Stichprobe aus dem Trainingssplit zu laden, da der Datensatz recht groß ist: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("food101", split="train[:100]") ``` Als Nächstes sehen Sie sich das Bild mit dem Merkmal 🤗 Datensätze [Bild](https://huggingface.co/docs/datasets/package_reference/main_classes?highlight=image#datasets.Image) an: ```py >>> dataset[0]["image"] ``` ![vision-preprocess-tutorial.png](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/vision-preprocess-tutorial.png) ### Merkmalsextraktor Laden Sie den Merkmalsextraktor mit [`AutoImageProcessor.from_pretrained`]: ```py >>> from transformers import AutoImageProcessor >>> image_processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") ``` ### Datenerweiterung Bei Bildverarbeitungsaufgaben ist es üblich, den Bildern als Teil der Vorverarbeitung eine Art von Datenerweiterung hinzuzufügen. Sie können Erweiterungen mit jeder beliebigen Bibliothek hinzufügen, aber in diesem Tutorial werden Sie das Modul [`transforms`](https://pytorch.org/vision/stable/transforms.html) von torchvision verwenden. 1. Normalisieren Sie das Bild und verwenden Sie [`Compose`](https://pytorch.org/vision/master/generated/torchvision.transforms.Compose.html), um einige Transformationen - [`RandomResizedCrop`](https://pytorch.org/vision/main/generated/torchvision.transforms.RandomResizedCrop.html) und [`ColorJitter`](https://pytorch.org/vision/main/generated/torchvision.transforms.ColorJitter.html) - miteinander zu verknüpfen: ```py >>> from torchvision.transforms import Compose, Normalize, RandomResizedCrop, ColorJitter, ToTensor >>> normalize = Normalize(mean=image_processor.image_mean, std=image_processor.image_std) >>> _transforms = Compose( ... [RandomResizedCrop(image_processor.size["height"]), ColorJitter(brightness=0.5, hue=0.5), ToTensor(), normalize] ... ) ``` 2. Das Modell akzeptiert [`pixel_values`](model_doc/visionencoderdecoder#transformers.VisionEncoderDecoderModel.forward.pixel_values) als Eingabe. Dieser Wert wird vom Merkmalsextraktor erzeugt. Erstellen Sie eine Funktion, die `pixel_values` aus den Transformationen erzeugt: ```py >>> def transforms(examples): ... examples["pixel_values"] = [_transforms(image.convert("RGB")) for image in examples["image"]] ... return examples ``` 3. Dann verwenden Sie 🤗 Datasets [`set_transform`](https://huggingface.co/docs/datasets/process#format-transform), um die Transformationen im laufenden Betrieb anzuwenden: ```py >>> dataset.set_transform(transforms) ``` 4. Wenn Sie nun auf das Bild zugreifen, werden Sie feststellen, dass der Feature Extractor die Modelleingabe "pixel_values" hinzugefügt hat: ```py >>> dataset[0]["image"] {'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=384x512 at 0x7F1A7B0630D0>, 'label': 6, 'pixel_values': tensor([[[ 0.0353, 0.0745, 0.1216, ..., -0.9922, -0.9922, -0.9922], [-0.0196, 0.0667, 0.1294, ..., -0.9765, -0.9843, -0.9922], [ 0.0196, 0.0824, 0.1137, ..., -0.9765, -0.9686, -0.8667], ..., [ 0.0275, 0.0745, 0.0510, ..., -0.1137, -0.1216, -0.0824], [ 0.0667, 0.0824, 0.0667, ..., -0.0588, -0.0745, -0.0980], [ 0.0353, 0.0353, 0.0431, ..., -0.0039, -0.0039, -0.0588]], [[ 0.2078, 0.2471, 0.2863, ..., -0.9451, -0.9373, -0.9451], [ 0.1608, 0.2471, 0.3098, ..., -0.9373, -0.9451, -0.9373], [ 0.2078, 0.2706, 0.3020, ..., -0.9608, -0.9373, -0.8275], ..., [-0.0353, 0.0118, -0.0039, ..., -0.2392, -0.2471, -0.2078], [ 0.0196, 0.0353, 0.0196, ..., -0.1843, -0.2000, -0.2235], [-0.0118, -0.0039, -0.0039, ..., -0.0980, -0.0980, -0.1529]], [[ 0.3961, 0.4431, 0.4980, ..., -0.9216, -0.9137, -0.9216], [ 0.3569, 0.4510, 0.5216, ..., -0.9059, -0.9137, -0.9137], [ 0.4118, 0.4745, 0.5216, ..., -0.9137, -0.8902, -0.7804], ..., [-0.2314, -0.1922, -0.2078, ..., -0.4196, -0.4275, -0.3882], [-0.1843, -0.1686, -0.2000, ..., -0.3647, -0.3804, -0.4039], [-0.1922, -0.1922, -0.1922, ..., -0.2941, -0.2863, -0.3412]]])} ``` Hier sehen Sie, wie das Bild nach der Vorverarbeitung aussieht. Wie von den angewandten Transformationen zu erwarten, wurde das Bild willkürlich beschnitten und seine Farbeigenschaften sind anders. ```py >>> import numpy as np >>> import matplotlib.pyplot as plt >>> img = dataset[0]["pixel_values"] >>> plt.imshow(img.permute(1, 2, 0)) ``` ![preprocessed_image](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/preprocessed_image.png) ## Multimodal Für multimodale Aufgaben werden Sie eine Kombination aus allem, was Sie bisher gelernt haben, verwenden und Ihre Fähigkeiten auf eine Aufgabe der automatischen Spracherkennung (ASR) anwenden. Dies bedeutet, dass Sie einen: * Feature Extractor zur Vorverarbeitung der Audiodaten. * Tokenizer, um den Text zu verarbeiten. Kehren wir zum [LJ Speech](https://huggingface.co/datasets/lj_speech) Datensatz zurück: ```py >>> from datasets import load_dataset >>> lj_speech = load_dataset("lj_speech", split="train") ``` Da Sie hauptsächlich an den Spalten "Audio" und "Text" interessiert sind, entfernen Sie die anderen Spalten: ```py >>> lj_speech = lj_speech.map(remove_columns=["file", "id", "normalized_text"]) ``` Schauen Sie sich nun die Spalten "Audio" und "Text" an: ```py >>> lj_speech[0]["audio"] {'array': array([-7.3242188e-04, -7.6293945e-04, -6.4086914e-04, ..., 7.3242188e-04, 2.1362305e-04, 6.1035156e-05], dtype=float32), 'path': '/root/.cache/huggingface/datasets/downloads/extracted/917ece08c95cf0c4115e45294e3cd0dee724a1165b7fc11798369308a465bd26/LJSpeech-1.1/wavs/LJ001-0001.wav', 'sampling_rate': 22050} >>> lj_speech[0]["text"] 'Printing, in the only sense with which we are at present concerned, differs from most if not from all the arts and crafts represented in the Exhibition' ``` Erinnern Sie sich an den früheren Abschnitt über die Verarbeitung von Audiodaten: Sie sollten immer die Abtastrate Ihrer Audiodaten [resample](preprocessing#audio), damit sie mit der Abtastrate des Datensatzes übereinstimmt, der für das Vortraining eines Modells verwendet wird: ```py >>> lj_speech = lj_speech.cast_column("audio", Audio(sampling_rate=16_000)) ``` ### Prozessor Ein Processor kombiniert einen Feature-Extraktor und einen Tokenizer. Laden Sie einen Processor mit [`AutoProcessor.from_pretrained`]: ```py >>> from transformers import AutoProcessor >>> processor = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h") ``` 1. Erstellen Sie eine Funktion, die die Audiodaten zu `input_values` verarbeitet und den Text zu `labels` tokenisiert. Dies sind Ihre Eingaben für das Modell: ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example.update(processor(audio=audio["array"], text=example["text"], sampling_rate=16000)) ... return example ``` 2. Wenden Sie die Funktion "prepare_dataset" auf ein Beispiel an: ```py >>> prepare_dataset(lj_speech[0]) ``` Beachten Sie, dass der Processor `input_values` und `labels` hinzugefügt hat. Auch die Abtastrate wurde korrekt auf 16kHz heruntergerechnet. Toll, Sie sollten jetzt in der Lage sein, Daten für jede Modalität vorzuverarbeiten und sogar verschiedene Modalitäten zu kombinieren! Im nächsten Kurs lernen Sie, wie Sie ein Modell mit Ihren neu aufbereiteten Daten feinabstimmen können.
transformers/docs/source/de/preprocessing.md/0
{ "file_path": "transformers/docs/source/de/preprocessing.md", "repo_id": "transformers", "token_count": 10560 }
4
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERTology There is a growing field of study concerned with investigating the inner working of large-scale transformers like BERT (that some call "BERTology"). Some good examples of this field are: - BERT Rediscovers the Classical NLP Pipeline by Ian Tenney, Dipanjan Das, Ellie Pavlick: https://arxiv.org/abs/1905.05950 - Are Sixteen Heads Really Better than One? by Paul Michel, Omer Levy, Graham Neubig: https://arxiv.org/abs/1905.10650 - What Does BERT Look At? An Analysis of BERT's Attention by Kevin Clark, Urvashi Khandelwal, Omer Levy, Christopher D. Manning: https://arxiv.org/abs/1906.04341 - CAT-probing: A Metric-based Approach to Interpret How Pre-trained Models for Programming Language Attend Code Structure: https://arxiv.org/abs/2210.04633 In order to help this new field develop, we have included a few additional features in the BERT/GPT/GPT-2 models to help people access the inner representations, mainly adapted from the great work of Paul Michel (https://arxiv.org/abs/1905.10650): - accessing all the hidden-states of BERT/GPT/GPT-2, - accessing all the attention weights for each head of BERT/GPT/GPT-2, - retrieving heads output values and gradients to be able to compute head importance score and prune head as explained in https://arxiv.org/abs/1905.10650. To help you understand and use these features, we have added a specific example script: [bertology.py](https://github.com/huggingface/transformers/tree/main/examples/research_projects/bertology/run_bertology.py) which extracts information and prune a model pre-trained on GLUE.
transformers/docs/source/en/bertology.md/0
{ "file_path": "transformers/docs/source/en/bertology.md", "repo_id": "transformers", "token_count": 640 }
5
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Hyperparameter Search using Trainer API 🤗 Transformers provides a [`Trainer`] class optimized for training 🤗 Transformers models, making it easier to start training without manually writing your own training loop. The [`Trainer`] provides API for hyperparameter search. This doc shows how to enable it in example. ## Hyperparameter Search backend [`Trainer`] supports four hyperparameter search backends currently: [optuna](https://optuna.org/), [sigopt](https://sigopt.com/), [raytune](https://docs.ray.io/en/latest/tune/index.html) and [wandb](https://wandb.ai/site/sweeps). you should install them before using them as the hyperparameter search backend ```bash pip install optuna/sigopt/wandb/ray[tune] ``` ## How to enable Hyperparameter search in example Define the hyperparameter search space, different backends need different format. For sigopt, see sigopt [object_parameter](https://docs.sigopt.com/ai-module-api-references/api_reference/objects/object_parameter), it's like following: ```py >>> def sigopt_hp_space(trial): ... return [ ... {"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double"}, ... { ... "categorical_values": ["16", "32", "64", "128"], ... "name": "per_device_train_batch_size", ... "type": "categorical", ... }, ... ] ``` For optuna, see optuna [object_parameter](https://optuna.readthedocs.io/en/stable/tutorial/10_key_features/002_configurations.html#sphx-glr-tutorial-10-key-features-002-configurations-py), it's like following: ```py >>> def optuna_hp_space(trial): ... return { ... "learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True), ... "per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [16, 32, 64, 128]), ... } ``` Optuna provides multi-objective HPO. You can pass `direction` in `hyperparameter_search` and define your own compute_objective to return multiple objective values. The Pareto Front (`List[BestRun]`) will be returned in hyperparameter_search, you should refer to the test case `TrainerHyperParameterMultiObjectOptunaIntegrationTest` in [test_trainer](https://github.com/huggingface/transformers/blob/main/tests/trainer/test_trainer.py). It's like following ```py >>> best_trials = trainer.hyperparameter_search( ... direction=["minimize", "maximize"], ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` For raytune, see raytune [object_parameter](https://docs.ray.io/en/latest/tune/api/search_space.html), it's like following: ```py >>> def ray_hp_space(trial): ... return { ... "learning_rate": tune.loguniform(1e-6, 1e-4), ... "per_device_train_batch_size": tune.choice([16, 32, 64, 128]), ... } ``` For wandb, see wandb [object_parameter](https://docs.wandb.ai/guides/sweeps/configuration), it's like following: ```py >>> def wandb_hp_space(trial): ... return { ... "method": "random", ... "metric": {"name": "objective", "goal": "minimize"}, ... "parameters": { ... "learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4}, ... "per_device_train_batch_size": {"values": [16, 32, 64, 128]}, ... }, ... } ``` Define a `model_init` function and pass it to the [`Trainer`], as an example: ```py >>> def model_init(trial): ... return AutoModelForSequenceClassification.from_pretrained( ... model_args.model_name_or_path, ... from_tf=bool(".ckpt" in model_args.model_name_or_path), ... config=config, ... cache_dir=model_args.cache_dir, ... revision=model_args.model_revision, ... token=True if model_args.use_auth_token else None, ... ) ``` Create a [`Trainer`] with your `model_init` function, training arguments, training and test datasets, and evaluation function: ```py >>> trainer = Trainer( ... model=None, ... args=training_args, ... train_dataset=small_train_dataset, ... eval_dataset=small_eval_dataset, ... compute_metrics=compute_metrics, ... processing_class=tokenizer, ... model_init=model_init, ... data_collator=data_collator, ... ) ``` Call hyperparameter search, get the best trial parameters, backend could be `"optuna"`/`"sigopt"`/`"wandb"`/`"ray"`. direction can be`"minimize"` or `"maximize"`, which indicates whether to optimize greater or lower objective. You could define your own compute_objective function, if not defined, the default compute_objective will be called, and the sum of eval metric like f1 is returned as objective value. ```py >>> best_trial = trainer.hyperparameter_search( ... direction="maximize", ... backend="optuna", ... hp_space=optuna_hp_space, ... n_trials=20, ... compute_objective=compute_objective, ... ) ``` ## Hyperparameter search For DDP finetune Currently, Hyperparameter search for DDP is enabled for optuna and sigopt. Only the rank-zero process will generate the search trial and pass the argument to other ranks.
transformers/docs/source/en/hpo_train.md/0
{ "file_path": "transformers/docs/source/en/hpo_train.md", "repo_id": "transformers", "token_count": 2075 }
6
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Agents & Tools <Tip warning={true}> Transformers Agents is an experimental API which is subject to change at any time. Results returned by the agents can vary as the APIs or underlying models are prone to change. </Tip> To learn more about agents and tools make sure to read the [introductory guide](../transformers_agents). This page contains the API docs for the underlying classes. ## Agents We provide two types of agents, based on the main [`Agent`] class: - [`CodeAgent`] acts in one shot, generating code to solve the task, then executes it at once. - [`ReactAgent`] acts step by step, each step consisting of one thought, then one tool call and execution. It has two classes: - [`ReactJsonAgent`] writes its tool calls in JSON. - [`ReactCodeAgent`] writes its tool calls in Python code. ### Agent [[autodoc]] Agent ### CodeAgent [[autodoc]] CodeAgent ### React agents [[autodoc]] ReactAgent [[autodoc]] ReactJsonAgent [[autodoc]] ReactCodeAgent ### ManagedAgent [[autodoc]] ManagedAgent ## Tools ### load_tool [[autodoc]] load_tool ### tool [[autodoc]] tool ### Tool [[autodoc]] Tool ### Toolbox [[autodoc]] Toolbox ### PipelineTool [[autodoc]] PipelineTool ### launch_gradio_demo [[autodoc]] launch_gradio_demo ### stream_to_gradio [[autodoc]] stream_to_gradio ### ToolCollection [[autodoc]] ToolCollection ## Engines You're free to create and use your own engines to be usable by the Agents framework. These engines have the following specification: 1. Follow the [messages format](../chat_templating.md) for its input (`List[Dict[str, str]]`) and return a string. 2. Stop generating outputs *before* the sequences passed in the argument `stop_sequences` ### TransformersEngine For convenience, we have added a `TransformersEngine` that implements the points above, taking a pre-initialized `Pipeline` as input. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline, TransformersEngine >>> model_name = "HuggingFaceTB/SmolLM-135M-Instruct" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) >>> model = AutoModelForCausalLM.from_pretrained(model_name) >>> pipe = pipeline("text-generation", model=model, tokenizer=tokenizer) >>> engine = TransformersEngine(pipe) >>> engine([{"role": "user", "content": "Ok!"}], stop_sequences=["great"]) "What a " ``` [[autodoc]] TransformersEngine ### HfApiEngine The `HfApiEngine` is an engine that wraps an [HF Inference API](https://huggingface.co/docs/api-inference/index) client for the execution of the LLM. ```python >>> from transformers import HfApiEngine >>> messages = [ ... {"role": "user", "content": "Hello, how are you?"}, ... {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, ... {"role": "user", "content": "No need to help, take it easy."}, ... ] >>> HfApiEngine()(messages, stop_sequences=["conversation"]) "That's very kind of you to say! It's always nice to have a relaxed " ``` [[autodoc]] HfApiEngine ## Agent Types Agents can handle any type of object in-between tools; tools, being completely multimodal, can accept and return text, image, audio, video, among other types. In order to increase compatibility between tools, as well as to correctly render these returns in ipython (jupyter, colab, ipython notebooks, ...), we implement wrapper classes around these types. The wrapped objects should continue behaving as initially; a text object should still behave as a string, an image object should still behave as a `PIL.Image`. These types have three specific purposes: - Calling `to_raw` on the type should return the underlying object - Calling `to_string` on the type should return the object as a string: that can be the string in case of an `AgentText` but will be the path of the serialized version of the object in other instances - Displaying it in an ipython kernel should display the object correctly ### AgentText [[autodoc]] transformers.agents.agent_types.AgentText ### AgentImage [[autodoc]] transformers.agents.agent_types.AgentImage ### AgentAudio [[autodoc]] transformers.agents.agent_types.AgentAudio
transformers/docs/source/en/main_classes/agent.md/0
{ "file_path": "transformers/docs/source/en/main_classes/agent.md", "repo_id": "transformers", "token_count": 1406 }
7
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Processors Processors can mean two different things in the Transformers library: - the objects that pre-process inputs for multi-modal models such as [Wav2Vec2](../model_doc/wav2vec2) (speech and text) or [CLIP](../model_doc/clip) (text and vision) - deprecated objects that were used in older versions of the library to preprocess data for GLUE or SQUAD. ## Multi-modal processors Any multi-modal model will require an object to encode or decode the data that groups several modalities (among text, vision and audio). This is handled by objects called processors, which group together two or more processing objects such as tokenizers (for the text modality), image processors (for vision) and feature extractors (for audio). Those processors inherit from the following base class that implements the saving and loading functionality: [[autodoc]] ProcessorMixin ## Deprecated processors All processors follow the same architecture which is that of the [`~data.processors.utils.DataProcessor`]. The processor returns a list of [`~data.processors.utils.InputExample`]. These [`~data.processors.utils.InputExample`] can be converted to [`~data.processors.utils.InputFeatures`] in order to be fed to the model. [[autodoc]] data.processors.utils.DataProcessor [[autodoc]] data.processors.utils.InputExample [[autodoc]] data.processors.utils.InputFeatures ## GLUE [General Language Understanding Evaluation (GLUE)](https://gluebenchmark.com/) is a benchmark that evaluates the performance of models across a diverse set of existing NLU tasks. It was released together with the paper [GLUE: A multi-task benchmark and analysis platform for natural language understanding](https://openreview.net/pdf?id=rJ4km2R5t7) This library hosts a total of 10 processors for the following tasks: MRPC, MNLI, MNLI (mismatched), CoLA, SST2, STSB, QQP, QNLI, RTE and WNLI. Those processors are: - [`~data.processors.utils.MrpcProcessor`] - [`~data.processors.utils.MnliProcessor`] - [`~data.processors.utils.MnliMismatchedProcessor`] - [`~data.processors.utils.Sst2Processor`] - [`~data.processors.utils.StsbProcessor`] - [`~data.processors.utils.QqpProcessor`] - [`~data.processors.utils.QnliProcessor`] - [`~data.processors.utils.RteProcessor`] - [`~data.processors.utils.WnliProcessor`] Additionally, the following method can be used to load values from a data file and convert them to a list of [`~data.processors.utils.InputExample`]. [[autodoc]] data.processors.glue.glue_convert_examples_to_features ## XNLI [The Cross-Lingual NLI Corpus (XNLI)](https://www.nyu.edu/projects/bowman/xnli/) is a benchmark that evaluates the quality of cross-lingual text representations. XNLI is crowd-sourced dataset based on [*MultiNLI*](http://www.nyu.edu/projects/bowman/multinli/): pairs of text are labeled with textual entailment annotations for 15 different languages (including both high-resource language such as English and low-resource languages such as Swahili). It was released together with the paper [XNLI: Evaluating Cross-lingual Sentence Representations](https://arxiv.org/abs/1809.05053) This library hosts the processor to load the XNLI data: - [`~data.processors.utils.XnliProcessor`] Please note that since the gold labels are available on the test set, evaluation is performed on the test set. An example using these processors is given in the [run_xnli.py](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification/run_xnli.py) script. ## SQuAD [The Stanford Question Answering Dataset (SQuAD)](https://rajpurkar.github.io/SQuAD-explorer//) is a benchmark that evaluates the performance of models on question answering. Two versions are available, v1.1 and v2.0. The first version (v1.1) was released together with the paper [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://arxiv.org/abs/1606.05250). The second version (v2.0) was released alongside the paper [Know What You Don't Know: Unanswerable Questions for SQuAD](https://arxiv.org/abs/1806.03822). This library hosts a processor for each of the two versions: ### Processors Those processors are: - [`~data.processors.utils.SquadV1Processor`] - [`~data.processors.utils.SquadV2Processor`] They both inherit from the abstract class [`~data.processors.utils.SquadProcessor`] [[autodoc]] data.processors.squad.SquadProcessor - all Additionally, the following method can be used to convert SQuAD examples into [`~data.processors.utils.SquadFeatures`] that can be used as model inputs. [[autodoc]] data.processors.squad.squad_convert_examples_to_features These processors as well as the aforementioned method can be used with files containing the data as well as with the *tensorflow_datasets* package. Examples are given below. ### Example usage Here is an example using the processors as well as the conversion method using data files: ```python # Loading a V2 processor processor = SquadV2Processor() examples = processor.get_dev_examples(squad_v2_data_dir) # Loading a V1 processor processor = SquadV1Processor() examples = processor.get_dev_examples(squad_v1_data_dir) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` Using *tensorflow_datasets* is as easy as using a data file: ```python # tensorflow_datasets only handle Squad V1. tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) features = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=max_seq_length, doc_stride=args.doc_stride, max_query_length=max_query_length, is_training=not evaluate, ) ``` Another example using these processors is given in the [run_squad.py](https://github.com/huggingface/transformers/tree/main/examples/legacy/question-answering/run_squad.py) script.
transformers/docs/source/en/main_classes/processors.md/0
{ "file_path": "transformers/docs/source/en/main_classes/processors.md", "repo_id": "transformers", "token_count": 2073 }
8
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BARTpho ## Overview The BARTpho model was proposed in [BARTpho: Pre-trained Sequence-to-Sequence Models for Vietnamese](https://arxiv.org/abs/2109.09701) by Nguyen Luong Tran, Duong Minh Le and Dat Quoc Nguyen. The abstract from the paper is the following: *We present BARTpho with two versions -- BARTpho_word and BARTpho_syllable -- the first public large-scale monolingual sequence-to-sequence models pre-trained for Vietnamese. Our BARTpho uses the "large" architecture and pre-training scheme of the sequence-to-sequence denoising model BART, thus especially suitable for generative NLP tasks. Experiments on a downstream task of Vietnamese text summarization show that in both automatic and human evaluations, our BARTpho outperforms the strong baseline mBART and improves the state-of-the-art. We release BARTpho to facilitate future research and applications of generative Vietnamese NLP tasks.* This model was contributed by [dqnguyen](https://huggingface.co/dqnguyen). The original code can be found [here](https://github.com/VinAIResearch/BARTpho). ## Usage example ```python >>> import torch >>> from transformers import AutoModel, AutoTokenizer >>> bartpho = AutoModel.from_pretrained("vinai/bartpho-syllable") >>> tokenizer = AutoTokenizer.from_pretrained("vinai/bartpho-syllable") >>> line = "Chúng tôi là những nghiên cứu viên." >>> input_ids = tokenizer(line, return_tensors="pt") >>> with torch.no_grad(): ... features = bartpho(**input_ids) # Models outputs are now tuples >>> # With TensorFlow 2.0+: >>> from transformers import TFAutoModel >>> bartpho = TFAutoModel.from_pretrained("vinai/bartpho-syllable") >>> input_ids = tokenizer(line, return_tensors="tf") >>> features = bartpho(**input_ids) ``` ## Usage tips - Following mBART, BARTpho uses the "large" architecture of BART with an additional layer-normalization layer on top of both the encoder and decoder. Thus, usage examples in the [documentation of BART](bart), when adapting to use with BARTpho, should be adjusted by replacing the BART-specialized classes with the mBART-specialized counterparts. For example: ```python >>> from transformers import MBartForConditionalGeneration >>> bartpho = MBartForConditionalGeneration.from_pretrained("vinai/bartpho-syllable") >>> TXT = "Chúng tôi là <mask> nghiên cứu viên." >>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"] >>> logits = bartpho(input_ids).logits >>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item() >>> probs = logits[0, masked_index].softmax(dim=0) >>> values, predictions = probs.topk(5) >>> print(tokenizer.decode(predictions).split()) ``` - This implementation is only for tokenization: "monolingual_vocab_file" consists of Vietnamese-specialized types extracted from the pre-trained SentencePiece model "vocab_file" that is available from the multilingual XLM-RoBERTa. Other languages, if employing this pre-trained multilingual SentencePiece model "vocab_file" for subword segmentation, can reuse BartphoTokenizer with their own language-specialized "monolingual_vocab_file". ## BartphoTokenizer [[autodoc]] BartphoTokenizer
transformers/docs/source/en/model_doc/bartpho.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bartpho.md", "repo_id": "transformers", "token_count": 1166 }
9
<!--Copyright 2023 The Intel Labs Team Authors, The Microsoft Research Team Authors and HuggingFace Inc. team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BridgeTower ## Overview The BridgeTower model was proposed in [BridgeTower: Building Bridges Between Encoders in Vision-Language Representative Learning](https://arxiv.org/abs/2206.08657) by Xiao Xu, Chenfei Wu, Shachar Rosenman, Vasudev Lal, Wanxiang Che, Nan Duan. The goal of this model is to build a bridge between each uni-modal encoder and the cross-modal encoder to enable comprehensive and detailed interaction at each layer of the cross-modal encoder thus achieving remarkable performance on various downstream tasks with almost negligible additional performance and computational costs. This paper has been accepted to the [AAAI'23](https://aaai.org/Conferences/AAAI-23/) conference. The abstract from the paper is the following: *Vision-Language (VL) models with the TWO-TOWER architecture have dominated visual-language representation learning in recent years. Current VL models either use lightweight uni-modal encoders and learn to extract, align and fuse both modalities simultaneously in a deep cross-modal encoder, or feed the last-layer uni-modal representations from the deep pre-trained uni-modal encoders into the top cross-modal encoder. Both approaches potentially restrict vision-language representation learning and limit model performance. In this paper, we propose BRIDGETOWER, which introduces multiple bridge layers that build a connection between the top layers of uni-modal encoders and each layer of the crossmodal encoder. This enables effective bottom-up cross-modal alignment and fusion between visual and textual representations of different semantic levels of pre-trained uni-modal encoders in the cross-modal encoder. Pre-trained with only 4M images, BRIDGETOWER achieves state-of-the-art performance on various downstream vision-language tasks. In particular, on the VQAv2 test-std set, BRIDGETOWER achieves an accuracy of 78.73%, outperforming the previous state-of-the-art model METER by 1.09% with the same pre-training data and almost negligible additional parameters and computational costs. Notably, when further scaling the model, BRIDGETOWER achieves an accuracy of 81.15%, surpassing models that are pre-trained on orders-of-magnitude larger datasets.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/bridgetower_architecture%20.jpg" alt="drawing" width="600"/> <small> BridgeTower architecture. Taken from the <a href="https://arxiv.org/abs/2206.08657">original paper.</a> </small> This model was contributed by [Anahita Bhiwandiwalla](https://huggingface.co/anahita-b), [Tiep Le](https://huggingface.co/Tile) and [Shaoyen Tseng](https://huggingface.co/shaoyent). The original code can be found [here](https://github.com/microsoft/BridgeTower). ## Usage tips and examples BridgeTower consists of a visual encoder, a textual encoder and cross-modal encoder with multiple lightweight bridge layers. The goal of this approach was to build a bridge between each uni-modal encoder and the cross-modal encoder to enable comprehensive and detailed interaction at each layer of the cross-modal encoder. In principle, one can apply any visual, textual or cross-modal encoder in the proposed architecture. The [`BridgeTowerProcessor`] wraps [`RobertaTokenizer`] and [`BridgeTowerImageProcessor`] into a single instance to both encode the text and prepare the images respectively. The following example shows how to run contrastive learning using [`BridgeTowerProcessor`] and [`BridgeTowerForContrastiveLearning`]. ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForContrastiveLearning >>> import requests >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"] >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") >>> model = BridgeTowerForContrastiveLearning.from_pretrained("BridgeTower/bridgetower-large-itm-mlm-itc") >>> # forward pass >>> scores = dict() >>> for text in texts: ... # prepare inputs ... encoding = processor(image, text, return_tensors="pt") ... outputs = model(**encoding) ... scores[text] = outputs ``` The following example shows how to run image-text retrieval using [`BridgeTowerProcessor`] and [`BridgeTowerForImageAndTextRetrieval`]. ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForImageAndTextRetrieval >>> import requests >>> from PIL import Image >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> texts = ["An image of two cats chilling on a couch", "A football player scoring a goal"] >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> model = BridgeTowerForImageAndTextRetrieval.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> # forward pass >>> scores = dict() >>> for text in texts: ... # prepare inputs ... encoding = processor(image, text, return_tensors="pt") ... outputs = model(**encoding) ... scores[text] = outputs.logits[0, 1].item() ``` The following example shows how to run masked language modeling using [`BridgeTowerProcessor`] and [`BridgeTowerForMaskedLM`]. ```python >>> from transformers import BridgeTowerProcessor, BridgeTowerForMaskedLM >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000360943.jpg" >>> image = Image.open(requests.get(url, stream=True).raw).convert("RGB") >>> text = "a <mask> looking out of the window" >>> processor = BridgeTowerProcessor.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> model = BridgeTowerForMaskedLM.from_pretrained("BridgeTower/bridgetower-base-itm-mlm") >>> # prepare inputs >>> encoding = processor(image, text, return_tensors="pt") >>> # forward pass >>> outputs = model(**encoding) >>> results = processor.decode(outputs.logits.argmax(dim=-1).squeeze(0).tolist()) >>> print(results) .a cat looking out of the window. ``` Tips: - This implementation of BridgeTower uses [`RobertaTokenizer`] to generate text embeddings and OpenAI's CLIP/ViT model to compute visual embeddings. - Checkpoints for pre-trained [bridgeTower-base](https://huggingface.co/BridgeTower/bridgetower-base) and [bridgetower masked language modeling and image text matching](https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm) are released. - Please refer to [Table 5](https://arxiv.org/pdf/2206.08657.pdf) for BridgeTower's performance on Image Retrieval and other down stream tasks. - The PyTorch version of this model is only available in torch 1.10 and higher. ## BridgeTowerConfig [[autodoc]] BridgeTowerConfig ## BridgeTowerTextConfig [[autodoc]] BridgeTowerTextConfig ## BridgeTowerVisionConfig [[autodoc]] BridgeTowerVisionConfig ## BridgeTowerImageProcessor [[autodoc]] BridgeTowerImageProcessor - preprocess ## BridgeTowerProcessor [[autodoc]] BridgeTowerProcessor - __call__ ## BridgeTowerModel [[autodoc]] BridgeTowerModel - forward ## BridgeTowerForContrastiveLearning [[autodoc]] BridgeTowerForContrastiveLearning - forward ## BridgeTowerForMaskedLM [[autodoc]] BridgeTowerForMaskedLM - forward ## BridgeTowerForImageAndTextRetrieval [[autodoc]] BridgeTowerForImageAndTextRetrieval - forward
transformers/docs/source/en/model_doc/bridgetower.md/0
{ "file_path": "transformers/docs/source/en/model_doc/bridgetower.md", "repo_id": "transformers", "token_count": 2392 }
10
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # EfficientNet ## Overview The EfficientNet model was proposed in [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) by Mingxing Tan and Quoc V. Le. EfficientNets are a family of image classification models, which achieve state-of-the-art accuracy, yet being an order-of-magnitude smaller and faster than previous models. The abstract from the paper is the following: *Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters.* This model was contributed by [adirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet). ## EfficientNetConfig [[autodoc]] EfficientNetConfig ## EfficientNetImageProcessor [[autodoc]] EfficientNetImageProcessor - preprocess ## EfficientNetModel [[autodoc]] EfficientNetModel - forward ## EfficientNetForImageClassification [[autodoc]] EfficientNetForImageClassification - forward
transformers/docs/source/en/model_doc/efficientnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/efficientnet.md", "repo_id": "transformers", "token_count": 725 }
11
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # FNet ## Overview The FNet model was proposed in [FNet: Mixing Tokens with Fourier Transforms](https://arxiv.org/abs/2105.03824) by James Lee-Thorp, Joshua Ainslie, Ilya Eckstein, Santiago Ontanon. The model replaces the self-attention layer in a BERT model with a fourier transform which returns only the real parts of the transform. The model is significantly faster than the BERT model because it has fewer parameters and is more memory efficient. The model achieves about 92-97% accuracy of BERT counterparts on GLUE benchmark, and trains much faster than the BERT model. The abstract from the paper is the following: *We show that Transformer encoder architectures can be sped up, with limited accuracy costs, by replacing the self-attention sublayers with simple linear transformations that "mix" input tokens. These linear mixers, along with standard nonlinearities in feed-forward layers, prove competent at modeling semantic relationships in several text classification tasks. Most surprisingly, we find that replacing the self-attention sublayer in a Transformer encoder with a standard, unparameterized Fourier Transform achieves 92-97% of the accuracy of BERT counterparts on the GLUE benchmark, but trains 80% faster on GPUs and 70% faster on TPUs at standard 512 input lengths. At longer input lengths, our FNet model is significantly faster: when compared to the "efficient" Transformers on the Long Range Arena benchmark, FNet matches the accuracy of the most accurate models, while outpacing the fastest models across all sequence lengths on GPUs (and across relatively shorter lengths on TPUs). Finally, FNet has a light memory footprint and is particularly efficient at smaller model sizes; for a fixed speed and accuracy budget, small FNet models outperform Transformer counterparts.* This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/google-research/google-research/tree/master/f_net). ## Usage tips The model was trained without an attention mask as it is based on Fourier Transform. The model was trained with maximum sequence length 512 which includes pad tokens. Hence, it is highly recommended to use the same maximum sequence length for fine-tuning and inference. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## FNetConfig [[autodoc]] FNetConfig ## FNetTokenizer [[autodoc]] FNetTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## FNetTokenizerFast [[autodoc]] FNetTokenizerFast ## FNetModel [[autodoc]] FNetModel - forward ## FNetForPreTraining [[autodoc]] FNetForPreTraining - forward ## FNetForMaskedLM [[autodoc]] FNetForMaskedLM - forward ## FNetForNextSentencePrediction [[autodoc]] FNetForNextSentencePrediction - forward ## FNetForSequenceClassification [[autodoc]] FNetForSequenceClassification - forward ## FNetForMultipleChoice [[autodoc]] FNetForMultipleChoice - forward ## FNetForTokenClassification [[autodoc]] FNetForTokenClassification - forward ## FNetForQuestionAnswering [[autodoc]] FNetForQuestionAnswering - forward
transformers/docs/source/en/model_doc/fnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/fnet.md", "repo_id": "transformers", "token_count": 1150 }
12
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPT-J ## Overview The GPT-J model was released in the [kingoflolz/mesh-transformer-jax](https://github.com/kingoflolz/mesh-transformer-jax) repository by Ben Wang and Aran Komatsuzaki. It is a GPT-2-like causal language model trained on [the Pile](https://pile.eleuther.ai/) dataset. This model was contributed by [Stella Biderman](https://huggingface.co/stellaathena). ## Usage tips - To load [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B) in float32 one would need at least 2x model size RAM: 1x for initial weights and another 1x to load the checkpoint. So for GPT-J it would take at least 48GB RAM to just load the model. To reduce the RAM usage there are a few options. The `torch_dtype` argument can be used to initialize the model in half-precision on a CUDA device only. There is also a fp16 branch which stores the fp16 weights, which could be used to further minimize the RAM usage: ```python >>> from transformers import GPTJForCausalLM >>> import torch >>> device = "cuda" >>> model = GPTJForCausalLM.from_pretrained( ... "EleutherAI/gpt-j-6B", ... revision="float16", ... torch_dtype=torch.float16, ... ).to(device) ``` - The model should fit on 16GB GPU for inference. For training/fine-tuning it would take much more GPU RAM. Adam optimizer for example makes four copies of the model: model, gradients, average and squared average of the gradients. So it would need at least 4x model size GPU memory, even with mixed precision as gradient updates are in fp32. This is not including the activations and data batches, which would again require some more GPU RAM. So one should explore solutions such as DeepSpeed, to train/fine-tune the model. Another option is to use the original codebase to train/fine-tune the model on TPU and then convert the model to Transformers format for inference. Instructions for that could be found [here](https://github.com/kingoflolz/mesh-transformer-jax/blob/master/howto_finetune.md) - Although the embedding matrix has a size of 50400, only 50257 entries are used by the GPT-2 tokenizer. These extra tokens are added for the sake of efficiency on TPUs. To avoid the mismatch between embedding matrix size and vocab size, the tokenizer for [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B) contains 143 extra tokens `<|extratoken_1|>... <|extratoken_143|>`, so the `vocab_size` of tokenizer also becomes 50400. ## Usage examples The [`~generation.GenerationMixin.generate`] method can be used to generate text using GPT-J model. ```python >>> from transformers import AutoModelForCausalLM, AutoTokenizer >>> model = AutoModelForCausalLM.from_pretrained("EleutherAI/gpt-j-6B") >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") >>> prompt = ( ... "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " ... "previously unexplored valley, in the Andes Mountains. Even more surprising to the " ... "researchers was the fact that the unicorns spoke perfect English." ... ) >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` ...or in float16 precision: ```python >>> from transformers import GPTJForCausalLM, AutoTokenizer >>> import torch >>> device = "cuda" >>> model = GPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B", torch_dtype=torch.float16).to(device) >>> tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-j-6B") >>> prompt = ( ... "In a shocking finding, scientists discovered a herd of unicorns living in a remote, " ... "previously unexplored valley, in the Andes Mountains. Even more surprising to the " ... "researchers was the fact that the unicorns spoke perfect English." ... ) >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids.to(device) >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens)[0] ``` ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with GPT-J. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-generation"/> - Description of [GPT-J](https://huggingface.co/EleutherAI/gpt-j-6B). - A blog on how to [Deploy GPT-J 6B for inference using Hugging Face Transformers and Amazon SageMaker](https://huggingface.co/blog/gptj-sagemaker). - A blog on how to [Accelerate GPT-J inference with DeepSpeed-Inference on GPUs](https://www.philschmid.de/gptj-deepspeed-inference). - A blog post introducing [GPT-J-6B: 6B JAX-Based Transformer](https://arankomatsuzaki.wordpress.com/2021/06/04/gpt-j/). 🌎 - A notebook for [GPT-J-6B Inference Demo](https://colab.research.google.com/github/kingoflolz/mesh-transformer-jax/blob/master/colab_demo.ipynb). 🌎 - Another notebook demonstrating [Inference with GPT-J-6B](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/GPT-J-6B/Inference_with_GPT_J_6B.ipynb). - [Causal language modeling](https://huggingface.co/course/en/chapter7/6?fw=pt#training-a-causal-language-model-from-scratch) chapter of the 🤗 Hugging Face Course. - [`GPTJForCausalLM`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#gpt-2gpt-and-causal-language-modeling), [text generation example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation), and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb). - [`TFGPTJForCausalLM`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_clmpy) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb). - [`FlaxGPTJForCausalLM`] is supported by this [causal language modeling example script](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#causal-language-modeling) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/causal_language_modeling_flax.ipynb). **Documentation resources** - [Text classification task guide](../tasks/sequence_classification) - [Question answering task guide](../tasks/question_answering) - [Causal language modeling task guide](../tasks/language_modeling) ## GPTJConfig [[autodoc]] GPTJConfig - all <frameworkcontent> <pt> ## GPTJModel [[autodoc]] GPTJModel - forward ## GPTJForCausalLM [[autodoc]] GPTJForCausalLM - forward ## GPTJForSequenceClassification [[autodoc]] GPTJForSequenceClassification - forward ## GPTJForQuestionAnswering [[autodoc]] GPTJForQuestionAnswering - forward </pt> <tf> ## TFGPTJModel [[autodoc]] TFGPTJModel - call ## TFGPTJForCausalLM [[autodoc]] TFGPTJForCausalLM - call ## TFGPTJForSequenceClassification [[autodoc]] TFGPTJForSequenceClassification - call ## TFGPTJForQuestionAnswering [[autodoc]] TFGPTJForQuestionAnswering - call </tf> <jax> ## FlaxGPTJModel [[autodoc]] FlaxGPTJModel - __call__ ## FlaxGPTJForCausalLM [[autodoc]] FlaxGPTJForCausalLM - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/gptj.md/0
{ "file_path": "transformers/docs/source/en/model_doc/gptj.md", "repo_id": "transformers", "token_count": 2807 }
13
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # LLaMA ## Overview The LLaMA model was proposed in [LLaMA: Open and Efficient Foundation Language Models](https://arxiv.org/abs/2302.13971) by Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, Aurelien Rodriguez, Armand Joulin, Edouard Grave, Guillaume Lample. It is a collection of foundation language models ranging from 7B to 65B parameters. The abstract from the paper is the following: *We introduce LLaMA, a collection of foundation language models ranging from 7B to 65B parameters. We train our models on trillions of tokens, and show that it is possible to train state-of-the-art models using publicly available datasets exclusively, without resorting to proprietary and inaccessible datasets. In particular, LLaMA-13B outperforms GPT-3 (175B) on most benchmarks, and LLaMA-65B is competitive with the best models, Chinchilla-70B and PaLM-540B. We release all our models to the research community. * This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). ## Usage tips - Weights for the LLaMA models can be obtained from by filling out [this form](https://docs.google.com/forms/d/e/1FAIpQLSfqNECQnMkycAp2jP4Z9TFX0cGR4uf7b_fBxjY_OjhJILlKGA/viewform?usp=send_form) - After downloading the weights, they will need to be converted to the Hugging Face Transformers format using the [conversion script](https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/convert_llama_weights_to_hf.py). The script can be called with the following (example) command: ```bash python src/transformers/models/llama/convert_llama_weights_to_hf.py \ --input_dir /path/to/downloaded/llama/weights --model_size 7B --output_dir /output/path ``` - After conversion, the model and tokenizer can be loaded via: ```python from transformers import LlamaForCausalLM, LlamaTokenizer tokenizer = LlamaTokenizer.from_pretrained("/output/path") model = LlamaForCausalLM.from_pretrained("/output/path") ``` Note that executing the script requires enough CPU RAM to host the whole model in float16 precision (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). For the 65B model, it's thus 130GB of RAM needed. - The LLaMA tokenizer is a BPE model based on [sentencepiece](https://github.com/google/sentencepiece). One quirk of sentencepiece is that when decoding a sequence, if the first token is the start of the word (e.g. "Banana"), the tokenizer does not prepend the prefix space to the string. This model was contributed by [zphang](https://huggingface.co/zphang) with contributions from [BlackSamorez](https://huggingface.co/BlackSamorez). The code of the implementation in Hugging Face is based on GPT-NeoX [here](https://github.com/EleutherAI/gpt-neox). The original code of the authors can be found [here](https://github.com/facebookresearch/llama). The Flax version of the implementation was contributed by [afmck](https://huggingface.co/afmck) with the code in the implementation based on Hugging Face's Flax GPT-Neo. Based on the original LLaMA model, Meta AI has released some follow-up works: - **Llama2**: Llama2 is an improved version of Llama with some architectural tweaks (Grouped Query Attention), and is pre-trained on 2Trillion tokens. Refer to the documentation of Llama2 which can be found [here](llama2). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with LLaMA. If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. <PipelineTag pipeline="text-classification"/> - A [notebook](https://colab.research.google.com/github/bigscience-workshop/petals/blob/main/examples/prompt-tuning-sst2.ipynb#scrollTo=f04ba4d2) on how to use prompt tuning to adapt the LLaMA model for text classification task. 🌎 <PipelineTag pipeline="question-answering"/> - [StackLLaMA: A hands-on guide to train LLaMA with RLHF](https://huggingface.co/blog/stackllama#stackllama-a-hands-on-guide-to-train-llama-with-rlhf), a blog post about how to train LLaMA to answer questions on [Stack Exchange](https://stackexchange.com/) with RLHF. ⚗️ Optimization - A [notebook](https://colab.research.google.com/drive/1SQUXq1AMZPSLD4mk3A3swUIc6Y2dclme?usp=sharing) on how to fine-tune LLaMA model using xturing library on GPU which has limited memory. 🌎 ⚡️ Inference - A [notebook](https://colab.research.google.com/github/DominguesM/alpaca-lora-ptbr-7b/blob/main/notebooks/02%20-%20Evaluate.ipynb) on how to run the LLaMA Model using PeftModel from the 🤗 PEFT library. 🌎 - A [notebook](https://colab.research.google.com/drive/1l2GiSSPbajVyp2Nk3CFT4t3uH6-5TiBe?usp=sharing) on how to load a PEFT adapter LLaMA model with LangChain. 🌎 🚀 Deploy - A [notebook](https://colab.research.google.com/github/lxe/simple-llama-finetuner/blob/master/Simple_LLaMA_FineTuner.ipynb#scrollTo=3PM_DilAZD8T) on how to fine-tune LLaMA model using LoRA method via the 🤗 PEFT library with intuitive UI. 🌎 - A [notebook](https://github.com/aws/amazon-sagemaker-examples/blob/main/introduction_to_amazon_algorithms/jumpstart-foundation-models/text-generation-open-llama.ipynb) on how to deploy Open-LLaMA model for text generation on Amazon SageMaker. 🌎 ## LlamaConfig [[autodoc]] LlamaConfig ## LlamaTokenizer [[autodoc]] LlamaTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## LlamaTokenizerFast [[autodoc]] LlamaTokenizerFast - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - update_post_processor - save_vocabulary ## LlamaModel [[autodoc]] LlamaModel - forward ## LlamaForCausalLM [[autodoc]] LlamaForCausalLM - forward ## LlamaForSequenceClassification [[autodoc]] LlamaForSequenceClassification - forward ## LlamaForQuestionAnswering [[autodoc]] LlamaForQuestionAnswering - forward ## LlamaForTokenClassification [[autodoc]] LlamaForTokenClassification - forward ## FlaxLlamaModel [[autodoc]] FlaxLlamaModel - __call__ ## FlaxLlamaForCausalLM [[autodoc]] FlaxLlamaForCausalLM - __call__
transformers/docs/source/en/model_doc/llama.md/0
{ "file_path": "transformers/docs/source/en/model_doc/llama.md", "repo_id": "transformers", "token_count": 2384 }
14
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MarkupLM ## Overview The MarkupLM model was proposed in [MarkupLM: Pre-training of Text and Markup Language for Visually-rich Document Understanding](https://arxiv.org/abs/2110.08518) by Junlong Li, Yiheng Xu, Lei Cui, Furu Wei. MarkupLM is BERT, but applied to HTML pages instead of raw text documents. The model incorporates additional embedding layers to improve performance, similar to [LayoutLM](layoutlm). The model can be used for tasks like question answering on web pages or information extraction from web pages. It obtains state-of-the-art results on 2 important benchmarks: - [WebSRC](https://x-lance.github.io/WebSRC/), a dataset for Web-Based Structural Reading Comprehension (a bit like SQuAD but for web pages) - [SWDE](https://www.researchgate.net/publication/221299838_From_one_tree_to_a_forest_a_unified_solution_for_structured_web_data_extraction), a dataset for information extraction from web pages (basically named-entity recognition on web pages) The abstract from the paper is the following: *Multimodal pre-training with text, layout, and image has made significant progress for Visually-rich Document Understanding (VrDU), especially the fixed-layout documents such as scanned document images. While, there are still a large number of digital documents where the layout information is not fixed and needs to be interactively and dynamically rendered for visualization, making existing layout-based pre-training approaches not easy to apply. In this paper, we propose MarkupLM for document understanding tasks with markup languages as the backbone such as HTML/XML-based documents, where text and markup information is jointly pre-trained. Experiment results show that the pre-trained MarkupLM significantly outperforms the existing strong baseline models on several document understanding tasks. The pre-trained model and code will be publicly available.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/microsoft/unilm/tree/master/markuplm). ## Usage tips - In addition to `input_ids`, [`~MarkupLMModel.forward`] expects 2 additional inputs, namely `xpath_tags_seq` and `xpath_subs_seq`. These are the XPATH tags and subscripts respectively for each token in the input sequence. - One can use [`MarkupLMProcessor`] to prepare all data for the model. Refer to the [usage guide](#usage-markuplmprocessor) for more info. <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/markuplm_architecture.jpg" alt="drawing" width="600"/> <small> MarkupLM architecture. Taken from the <a href="https://arxiv.org/abs/2110.08518">original paper.</a> </small> ## Usage: MarkupLMProcessor The easiest way to prepare data for the model is to use [`MarkupLMProcessor`], which internally combines a feature extractor ([`MarkupLMFeatureExtractor`]) and a tokenizer ([`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]). The feature extractor is used to extract all nodes and xpaths from the HTML strings, which are then provided to the tokenizer, which turns them into the token-level inputs of the model (`input_ids` etc.). Note that you can still use the feature extractor and tokenizer separately, if you only want to handle one of the two tasks. ```python from transformers import MarkupLMFeatureExtractor, MarkupLMTokenizerFast, MarkupLMProcessor feature_extractor = MarkupLMFeatureExtractor() tokenizer = MarkupLMTokenizerFast.from_pretrained("microsoft/markuplm-base") processor = MarkupLMProcessor(feature_extractor, tokenizer) ``` In short, one can provide HTML strings (and possibly additional data) to [`MarkupLMProcessor`], and it will create the inputs expected by the model. Internally, the processor first uses [`MarkupLMFeatureExtractor`] to get a list of nodes and corresponding xpaths. The nodes and xpaths are then provided to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`], which converts them to token-level `input_ids`, `attention_mask`, `token_type_ids`, `xpath_subs_seq`, `xpath_tags_seq`. Optionally, one can provide node labels to the processor, which are turned into token-level `labels`. [`MarkupLMFeatureExtractor`] uses [Beautiful Soup](https://www.crummy.com/software/BeautifulSoup/bs4/doc/), a Python library for pulling data out of HTML and XML files, under the hood. Note that you can still use your own parsing solution of choice, and provide the nodes and xpaths yourself to [`MarkupLMTokenizer`] or [`MarkupLMTokenizerFast`]. In total, there are 5 use cases that are supported by the processor. Below, we list them all. Note that each of these use cases work for both batched and non-batched inputs (we illustrate them for non-batched inputs). **Use case 1: web page classification (training, inference) + token classification (inference), parse_html = True** This is the simplest case, in which the processor will use the feature extractor to get all nodes and xpaths from the HTML. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> html_string = """ ... <!DOCTYPE html> ... <html> ... <head> ... <title>Hello world</title> ... </head> ... <body> ... <h1>Welcome</h1> ... <p>Here is my website.</p> ... </body> ... </html>""" >>> # note that you can also add provide all tokenizer parameters here such as padding, truncation >>> encoding = processor(html_string, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` **Use case 2: web page classification (training, inference) + token classification (inference), parse_html=False** In case one already has obtained all nodes and xpaths, one doesn't need the feature extractor. In that case, one should provide the nodes and corresponding xpaths themselves to the processor, and make sure to set `parse_html` to `False`. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> processor.parse_html = False >>> nodes = ["hello", "world", "how", "are"] >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] >>> encoding = processor(nodes=nodes, xpaths=xpaths, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` **Use case 3: token classification (training), parse_html=False** For token classification tasks (such as [SWDE](https://paperswithcode.com/dataset/swde)), one can also provide the corresponding node labels in order to train a model. The processor will then convert these into token-level `labels`. By default, it will only label the first wordpiece of a word, and label the remaining wordpieces with -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. In case you want all wordpieces of a word to be labeled, you can initialize the tokenizer with `only_label_first_subword` set to `False`. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> processor.parse_html = False >>> nodes = ["hello", "world", "how", "are"] >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] >>> node_labels = [1, 2, 2, 1] >>> encoding = processor(nodes=nodes, xpaths=xpaths, node_labels=node_labels, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq', 'labels']) ``` **Use case 4: web page question answering (inference), parse_html=True** For question answering tasks on web pages, you can provide a question to the processor. By default, the processor will use the feature extractor to get all nodes and xpaths, and create [CLS] question tokens [SEP] word tokens [SEP]. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> html_string = """ ... <!DOCTYPE html> ... <html> ... <head> ... <title>Hello world</title> ... </head> ... <body> ... <h1>Welcome</h1> ... <p>My name is Niels.</p> ... </body> ... </html>""" >>> question = "What's his name?" >>> encoding = processor(html_string, questions=question, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` **Use case 5: web page question answering (inference), parse_html=False** For question answering tasks (such as WebSRC), you can provide a question to the processor. If you have extracted all nodes and xpaths yourself, you can provide them directly to the processor. Make sure to set `parse_html` to `False`. ```python >>> from transformers import MarkupLMProcessor >>> processor = MarkupLMProcessor.from_pretrained("microsoft/markuplm-base") >>> processor.parse_html = False >>> nodes = ["hello", "world", "how", "are"] >>> xpaths = ["/html/body/div/li[1]/div/span", "/html/body/div/li[1]/div/span", "html/body", "html/body/div"] >>> question = "What's his name?" >>> encoding = processor(nodes=nodes, xpaths=xpaths, questions=question, return_tensors="pt") >>> print(encoding.keys()) dict_keys(['input_ids', 'token_type_ids', 'attention_mask', 'xpath_tags_seq', 'xpath_subs_seq']) ``` ## Resources - [Demo notebooks](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/MarkupLM) - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) ## MarkupLMConfig [[autodoc]] MarkupLMConfig - all ## MarkupLMFeatureExtractor [[autodoc]] MarkupLMFeatureExtractor - __call__ ## MarkupLMTokenizer [[autodoc]] MarkupLMTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary ## MarkupLMTokenizerFast [[autodoc]] MarkupLMTokenizerFast - all ## MarkupLMProcessor [[autodoc]] MarkupLMProcessor - __call__ ## MarkupLMModel [[autodoc]] MarkupLMModel - forward ## MarkupLMForSequenceClassification [[autodoc]] MarkupLMForSequenceClassification - forward ## MarkupLMForTokenClassification [[autodoc]] MarkupLMForTokenClassification - forward ## MarkupLMForQuestionAnswering [[autodoc]] MarkupLMForQuestionAnswering - forward
transformers/docs/source/en/model_doc/markuplm.md/0
{ "file_path": "transformers/docs/source/en/model_doc/markuplm.md", "repo_id": "transformers", "token_count": 3443 }
15
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # MobileBERT ## Overview The MobileBERT model was proposed in [MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited Devices](https://arxiv.org/abs/2004.02984) by Zhiqing Sun, Hongkun Yu, Xiaodan Song, Renjie Liu, Yiming Yang, and Denny Zhou. It's a bidirectional transformer based on the BERT model, which is compressed and accelerated using several approaches. The abstract from the paper is the following: *Natural Language Processing (NLP) has recently achieved great success by using huge pre-trained models with hundreds of millions of parameters. However, these models suffer from heavy model sizes and high latency such that they cannot be deployed to resource-limited mobile devices. In this paper, we propose MobileBERT for compressing and accelerating the popular BERT model. Like the original BERT, MobileBERT is task-agnostic, that is, it can be generically applied to various downstream NLP tasks via simple fine-tuning. Basically, MobileBERT is a thin version of BERT_LARGE, while equipped with bottleneck structures and a carefully designed balance between self-attentions and feed-forward networks. To train MobileBERT, we first train a specially designed teacher model, an inverted-bottleneck incorporated BERT_LARGE model. Then, we conduct knowledge transfer from this teacher to MobileBERT. Empirical studies show that MobileBERT is 4.3x smaller and 5.5x faster than BERT_BASE while achieving competitive results on well-known benchmarks. On the natural language inference tasks of GLUE, MobileBERT achieves a GLUEscore o 77.7 (0.6 lower than BERT_BASE), and 62 ms latency on a Pixel 4 phone. On the SQuAD v1.1/v2.0 question answering task, MobileBERT achieves a dev F1 score of 90.0/79.2 (1.5/2.1 higher than BERT_BASE).* This model was contributed by [vshampor](https://huggingface.co/vshampor). The original code can be found [here](https://github.com/google-research/google-research/tree/master/mobilebert). ## Usage tips - MobileBERT is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than the left. - MobileBERT is similar to BERT and therefore relies on the masked language modeling (MLM) objective. It is therefore efficient at predicting masked tokens and at NLU in general, but is not optimal for text generation. Models trained with a causal language modeling (CLM) objective are better in that regard. ## Resources - [Text classification task guide](../tasks/sequence_classification) - [Token classification task guide](../tasks/token_classification) - [Question answering task guide](../tasks/question_answering) - [Masked language modeling task guide](../tasks/masked_language_modeling) - [Multiple choice task guide](../tasks/multiple_choice) ## MobileBertConfig [[autodoc]] MobileBertConfig ## MobileBertTokenizer [[autodoc]] MobileBertTokenizer ## MobileBertTokenizerFast [[autodoc]] MobileBertTokenizerFast ## MobileBert specific outputs [[autodoc]] models.mobilebert.modeling_mobilebert.MobileBertForPreTrainingOutput [[autodoc]] models.mobilebert.modeling_tf_mobilebert.TFMobileBertForPreTrainingOutput <frameworkcontent> <pt> ## MobileBertModel [[autodoc]] MobileBertModel - forward ## MobileBertForPreTraining [[autodoc]] MobileBertForPreTraining - forward ## MobileBertForMaskedLM [[autodoc]] MobileBertForMaskedLM - forward ## MobileBertForNextSentencePrediction [[autodoc]] MobileBertForNextSentencePrediction - forward ## MobileBertForSequenceClassification [[autodoc]] MobileBertForSequenceClassification - forward ## MobileBertForMultipleChoice [[autodoc]] MobileBertForMultipleChoice - forward ## MobileBertForTokenClassification [[autodoc]] MobileBertForTokenClassification - forward ## MobileBertForQuestionAnswering [[autodoc]] MobileBertForQuestionAnswering - forward </pt> <tf> ## TFMobileBertModel [[autodoc]] TFMobileBertModel - call ## TFMobileBertForPreTraining [[autodoc]] TFMobileBertForPreTraining - call ## TFMobileBertForMaskedLM [[autodoc]] TFMobileBertForMaskedLM - call ## TFMobileBertForNextSentencePrediction [[autodoc]] TFMobileBertForNextSentencePrediction - call ## TFMobileBertForSequenceClassification [[autodoc]] TFMobileBertForSequenceClassification - call ## TFMobileBertForMultipleChoice [[autodoc]] TFMobileBertForMultipleChoice - call ## TFMobileBertForTokenClassification [[autodoc]] TFMobileBertForTokenClassification - call ## TFMobileBertForQuestionAnswering [[autodoc]] TFMobileBertForQuestionAnswering - call </tf> </frameworkcontent>
transformers/docs/source/en/model_doc/mobilebert.md/0
{ "file_path": "transformers/docs/source/en/model_doc/mobilebert.md", "repo_id": "transformers", "token_count": 1548 }
16
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Neighborhood Attention Transformer <Tip warning={true}> This model is in maintenance mode only, we don't accept any new PRs changing its code. If you run into any issues running this model, please reinstall the last version that supported this model: v4.40.2. You can do so by running the following command: `pip install -U transformers==4.40.2`. </Tip> ## Overview NAT was proposed in [Neighborhood Attention Transformer](https://arxiv.org/abs/2204.07143) by Ali Hassani, Steven Walton, Jiachen Li, Shen Li, and Humphrey Shi. It is a hierarchical vision transformer based on Neighborhood Attention, a sliding-window self attention pattern. The abstract from the paper is the following: *We present Neighborhood Attention (NA), the first efficient and scalable sliding-window attention mechanism for vision. NA is a pixel-wise operation, localizing self attention (SA) to the nearest neighboring pixels, and therefore enjoys a linear time and space complexity compared to the quadratic complexity of SA. The sliding-window pattern allows NA's receptive field to grow without needing extra pixel shifts, and preserves translational equivariance, unlike Swin Transformer's Window Self Attention (WSA). We develop NATTEN (Neighborhood Attention Extension), a Python package with efficient C++ and CUDA kernels, which allows NA to run up to 40% faster than Swin's WSA while using up to 25% less memory. We further present Neighborhood Attention Transformer (NAT), a new hierarchical transformer design based on NA that boosts image classification and downstream vision performance. Experimental results on NAT are competitive; NAT-Tiny reaches 83.2% top-1 accuracy on ImageNet, 51.4% mAP on MS-COCO and 48.4% mIoU on ADE20K, which is 1.9% ImageNet accuracy, 1.0% COCO mAP, and 2.6% ADE20K mIoU improvement over a Swin model with similar size. * <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/neighborhood-attention-pattern.jpg" alt="drawing" width="600"/> <small> Neighborhood Attention compared to other attention patterns. Taken from the <a href="https://arxiv.org/abs/2204.07143">original paper</a>.</small> This model was contributed by [Ali Hassani](https://huggingface.co/alihassanijr). The original code can be found [here](https://github.com/SHI-Labs/Neighborhood-Attention-Transformer). ## Usage tips - One can use the [`AutoImageProcessor`] API to prepare images for the model. - NAT can be used as a *backbone*. When `output_hidden_states = True`, it will output both `hidden_states` and `reshaped_hidden_states`. The `reshaped_hidden_states` have a shape of `(batch, num_channels, height, width)` rather than `(batch_size, height, width, num_channels)`. Notes: - NAT depends on [NATTEN](https://github.com/SHI-Labs/NATTEN/)'s implementation of Neighborhood Attention. You can install it with pre-built wheels for Linux by referring to [shi-labs.com/natten](https://shi-labs.com/natten), or build on your system by running `pip install natten`. Note that the latter will likely take time to compile. NATTEN does not support Windows devices yet. - Patch size of 4 is only supported at the moment. ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with NAT. <PipelineTag pipeline="image-classification"/> - [`NatForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## NatConfig [[autodoc]] NatConfig ## NatModel [[autodoc]] NatModel - forward ## NatForImageClassification [[autodoc]] NatForImageClassification - forward
transformers/docs/source/en/model_doc/nat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/nat.md", "repo_id": "transformers", "token_count": 1320 }
17
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # OWL-ViT ## Overview The OWL-ViT (short for Vision Transformer for Open-World Localization) was proposed in [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. OWL-ViT is an open-vocabulary object detection network trained on a variety of (image, text) pairs. It can be used to query an image with one or multiple text queries to search for and detect target objects described in text. The abstract from the paper is the following: *Combining simple architectures with large-scale pre-training has led to massive improvements in image classification. For object detection, pre-training and scaling approaches are less well established, especially in the long-tailed and open-vocabulary setting, where training data is relatively scarce. In this paper, we propose a strong recipe for transferring image-text models to open-vocabulary object detection. We use a standard Vision Transformer architecture with minimal modifications, contrastive image-text pre-training, and end-to-end detection fine-tuning. Our analysis of the scaling properties of this setup shows that increasing image-level pre-training and model size yield consistent improvements on the downstream detection task. We provide the adaptation strategies and regularizations needed to attain very strong performance on zero-shot text-conditioned and one-shot image-conditioned object detection. Code and models are available on GitHub.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/owlvit_architecture.jpg" alt="drawing" width="600"/> <small> OWL-ViT architecture. Taken from the <a href="https://arxiv.org/abs/2205.06230">original paper</a>. </small> This model was contributed by [adirik](https://huggingface.co/adirik). The original code can be found [here](https://github.com/google-research/scenic/tree/main/scenic/projects/owl_vit). ## Usage tips OWL-ViT is a zero-shot text-conditioned object detection model. OWL-ViT uses [CLIP](clip) as its multi-modal backbone, with a ViT-like Transformer to get visual features and a causal language model to get the text features. To use CLIP for detection, OWL-ViT removes the final token pooling layer of the vision model and attaches a lightweight classification and box head to each transformer output token. Open-vocabulary classification is enabled by replacing the fixed classification layer weights with the class-name embeddings obtained from the text model. The authors first train CLIP from scratch and fine-tune it end-to-end with the classification and box heads on standard detection datasets using a bipartite matching loss. One or multiple text queries per image can be used to perform zero-shot text-conditioned object detection. [`OwlViTImageProcessor`] can be used to resize (or rescale) and normalize images for the model and [`CLIPTokenizer`] is used to encode the text. [`OwlViTProcessor`] wraps [`OwlViTImageProcessor`] and [`CLIPTokenizer`] into a single instance to both encode the text and prepare the images. The following example shows how to perform object detection using [`OwlViTProcessor`] and [`OwlViTForObjectDetection`]. ```python >>> import requests >>> from PIL import Image >>> import torch >>> from transformers import OwlViTProcessor, OwlViTForObjectDetection >>> processor = OwlViTProcessor.from_pretrained("google/owlvit-base-patch32") >>> model = OwlViTForObjectDetection.from_pretrained("google/owlvit-base-patch32") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> text_labels = [["a photo of a cat", "a photo of a dog"]] >>> inputs = processor(text=text_labels, images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # Target image sizes (height, width) to rescale box predictions [batch_size, 2] >>> target_sizes = torch.tensor([(image.height, image.width)]) >>> # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax) >>> results = processor.post_process_grounded_object_detection( ... outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels ... ) >>> # Retrieve predictions for the first image for the corresponding text queries >>> result = results[0] >>> boxes, scores, text_labels = result["boxes"], result["scores"], result["text_labels"] >>> for box, score, text_label in zip(boxes, scores, text_labels): ... box = [round(i, 2) for i in box.tolist()] ... print(f"Detected {text_label} with confidence {round(score.item(), 3)} at location {box}") Detected a photo of a cat with confidence 0.707 at location [324.97, 20.44, 640.58, 373.29] Detected a photo of a cat with confidence 0.717 at location [1.46, 55.26, 315.55, 472.17] ``` ## Resources A demo notebook on using OWL-ViT for zero- and one-shot (image-guided) object detection can be found [here](https://github.com/huggingface/notebooks/blob/main/examples/zeroshot_object_detection_with_owlvit.ipynb). ## OwlViTConfig [[autodoc]] OwlViTConfig - from_text_vision_configs ## OwlViTTextConfig [[autodoc]] OwlViTTextConfig ## OwlViTVisionConfig [[autodoc]] OwlViTVisionConfig ## OwlViTImageProcessor [[autodoc]] OwlViTImageProcessor - preprocess - post_process_object_detection - post_process_image_guided_detection ## OwlViTProcessor [[autodoc]] OwlViTProcessor - __call__ - post_process_grounded_object_detection - post_process_image_guided_detection ## OwlViTModel [[autodoc]] OwlViTModel - forward - get_text_features - get_image_features ## OwlViTTextModel [[autodoc]] OwlViTTextModel - forward ## OwlViTVisionModel [[autodoc]] OwlViTVisionModel - forward ## OwlViTForObjectDetection [[autodoc]] OwlViTForObjectDetection - forward - image_guided_detection
transformers/docs/source/en/model_doc/owlvit.md/0
{ "file_path": "transformers/docs/source/en/model_doc/owlvit.md", "repo_id": "transformers", "token_count": 1993 }
18
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Pop2Piano <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/spaces/sweetcocoa/pop2piano"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## Overview The Pop2Piano model was proposed in [Pop2Piano : Pop Audio-based Piano Cover Generation](https://arxiv.org/abs/2211.00895) by Jongho Choi and Kyogu Lee. Piano covers of pop music are widely enjoyed, but generating them from music is not a trivial task. It requires great expertise with playing piano as well as knowing different characteristics and melodies of a song. With Pop2Piano you can directly generate a cover from a song's audio waveform. It is the first model to directly generate a piano cover from pop audio without melody and chord extraction modules. Pop2Piano is an encoder-decoder Transformer model based on [T5](https://arxiv.org/pdf/1910.10683.pdf). The input audio is transformed to its waveform and passed to the encoder, which transforms it to a latent representation. The decoder uses these latent representations to generate token ids in an autoregressive way. Each token id corresponds to one of four different token types: time, velocity, note and 'special'. The token ids are then decoded to their equivalent MIDI file. The abstract from the paper is the following: *Piano covers of pop music are enjoyed by many people. However, the task of automatically generating piano covers of pop music is still understudied. This is partly due to the lack of synchronized {Pop, Piano Cover} data pairs, which made it challenging to apply the latest data-intensive deep learning-based methods. To leverage the power of the data-driven approach, we make a large amount of paired and synchronized {Pop, Piano Cover} data using an automated pipeline. In this paper, we present Pop2Piano, a Transformer network that generates piano covers given waveforms of pop music. To the best of our knowledge, this is the first model to generate a piano cover directly from pop audio without using melody and chord extraction modules. We show that Pop2Piano, trained with our dataset, is capable of producing plausible piano covers.* This model was contributed by [Susnato Dhar](https://huggingface.co/susnato). The original code can be found [here](https://github.com/sweetcocoa/pop2piano). ## Usage tips * To use Pop2Piano, you will need to install the 🤗 Transformers library, as well as the following third party modules: ```bash pip install pretty-midi==0.2.9 essentia==2.1b6.dev1034 librosa scipy ``` Please note that you may need to restart your runtime after installation. * Pop2Piano is an Encoder-Decoder based model like T5. * Pop2Piano can be used to generate midi-audio files for a given audio sequence. * Choosing different composers in `Pop2PianoForConditionalGeneration.generate()` can lead to variety of different results. * Setting the sampling rate to 44.1 kHz when loading the audio file can give good performance. * Though Pop2Piano was mainly trained on Korean Pop music, it also does pretty well on other Western Pop or Hip Hop songs. ## Examples - Example using HuggingFace Dataset: ```python >>> from datasets import load_dataset >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> ds = load_dataset("sweetcocoa/pop2piano_ci", split="test") >>> inputs = processor( ... audio=ds["audio"][0]["array"], sampling_rate=ds["audio"][0]["sampling_rate"], return_tensors="pt" ... ) >>> model_output = model.generate(input_features=inputs["input_features"], composer="composer1") >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"][0] >>> tokenizer_output.write("./Outputs/midi_output.mid") ``` - Example using your own audio file: ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> audio, sr = librosa.load("<your_audio_file_here>", sr=44100) # feel free to change the sr to a suitable value. >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> inputs = processor(audio=audio, sampling_rate=sr, return_tensors="pt") >>> model_output = model.generate(input_features=inputs["input_features"], composer="composer1") >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"][0] >>> tokenizer_output.write("./Outputs/midi_output.mid") ``` - Example of processing multiple audio files in batch: ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoProcessor >>> # feel free to change the sr to a suitable value. >>> audio1, sr1 = librosa.load("<your_first_audio_file_here>", sr=44100) >>> audio2, sr2 = librosa.load("<your_second_audio_file_here>", sr=44100) >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> processor = Pop2PianoProcessor.from_pretrained("sweetcocoa/pop2piano") >>> inputs = processor(audio=[audio1, audio2], sampling_rate=[sr1, sr2], return_attention_mask=True, return_tensors="pt") >>> # Since we now generating in batch(2 audios) we must pass the attention_mask >>> model_output = model.generate( ... input_features=inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... composer="composer1", ... ) >>> tokenizer_output = processor.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"] >>> # Since we now have 2 generated MIDI files >>> tokenizer_output[0].write("./Outputs/midi_output1.mid") >>> tokenizer_output[1].write("./Outputs/midi_output2.mid") ``` - Example of processing multiple audio files in batch (Using `Pop2PianoFeatureExtractor` and `Pop2PianoTokenizer`): ```python >>> import librosa >>> from transformers import Pop2PianoForConditionalGeneration, Pop2PianoFeatureExtractor, Pop2PianoTokenizer >>> # feel free to change the sr to a suitable value. >>> audio1, sr1 = librosa.load("<your_first_audio_file_here>", sr=44100) >>> audio2, sr2 = librosa.load("<your_second_audio_file_here>", sr=44100) >>> model = Pop2PianoForConditionalGeneration.from_pretrained("sweetcocoa/pop2piano") >>> feature_extractor = Pop2PianoFeatureExtractor.from_pretrained("sweetcocoa/pop2piano") >>> tokenizer = Pop2PianoTokenizer.from_pretrained("sweetcocoa/pop2piano") >>> inputs = feature_extractor( ... audio=[audio1, audio2], ... sampling_rate=[sr1, sr2], ... return_attention_mask=True, ... return_tensors="pt", ... ) >>> # Since we now generating in batch(2 audios) we must pass the attention_mask >>> model_output = model.generate( ... input_features=inputs["input_features"], ... attention_mask=inputs["attention_mask"], ... composer="composer1", ... ) >>> tokenizer_output = tokenizer.batch_decode( ... token_ids=model_output, feature_extractor_output=inputs ... )["pretty_midi_objects"] >>> # Since we now have 2 generated MIDI files >>> tokenizer_output[0].write("./Outputs/midi_output1.mid") >>> tokenizer_output[1].write("./Outputs/midi_output2.mid") ``` ## Pop2PianoConfig [[autodoc]] Pop2PianoConfig ## Pop2PianoFeatureExtractor [[autodoc]] Pop2PianoFeatureExtractor - __call__ ## Pop2PianoForConditionalGeneration [[autodoc]] Pop2PianoForConditionalGeneration - forward - generate ## Pop2PianoTokenizer [[autodoc]] Pop2PianoTokenizer - __call__ ## Pop2PianoProcessor [[autodoc]] Pop2PianoProcessor - __call__
transformers/docs/source/en/model_doc/pop2piano.md/0
{ "file_path": "transformers/docs/source/en/model_doc/pop2piano.md", "repo_id": "transformers", "token_count": 2624 }
19
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # ResNet ## Overview The ResNet model was proposed in [Deep Residual Learning for Image Recognition](https://arxiv.org/abs/1512.03385) by Kaiming He, Xiangyu Zhang, Shaoqing Ren and Jian Sun. Our implementation follows the small changes made by [Nvidia](https://catalog.ngc.nvidia.com/orgs/nvidia/resources/resnet_50_v1_5_for_pytorch), we apply the `stride=2` for downsampling in bottleneck's `3x3` conv and not in the first `1x1`. This is generally known as "ResNet v1.5". ResNet introduced residual connections, they allow to train networks with an unseen number of layers (up to 1000). ResNet won the 2015 ILSVRC & COCO competition, one important milestone in deep computer vision. The abstract from the paper is the following: *Deeper neural networks are more difficult to train. We present a residual learning framework to ease the training of networks that are substantially deeper than those used previously. We explicitly reformulate the layers as learning residual functions with reference to the layer inputs, instead of learning unreferenced functions. We provide comprehensive empirical evidence showing that these residual networks are easier to optimize, and can gain accuracy from considerably increased depth. On the ImageNet dataset we evaluate residual nets with a depth of up to 152 layers---8x deeper than VGG nets but still having lower complexity. An ensemble of these residual nets achieves 3.57% error on the ImageNet test set. This result won the 1st place on the ILSVRC 2015 classification task. We also present analysis on CIFAR-10 with 100 and 1000 layers. The depth of representations is of central importance for many visual recognition tasks. Solely due to our extremely deep representations, we obtain a 28% relative improvement on the COCO object detection dataset. Deep residual nets are foundations of our submissions to ILSVRC & COCO 2015 competitions, where we also won the 1st places on the tasks of ImageNet detection, ImageNet localization, COCO detection, and COCO segmentation.* The figure below illustrates the architecture of ResNet. Taken from the [original paper](https://arxiv.org/abs/1512.03385). <img width="600" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/resnet_architecture.png"/> This model was contributed by [Francesco](https://huggingface.co/Francesco). The TensorFlow version of this model was added by [amyeroberts](https://huggingface.co/amyeroberts). The original code can be found [here](https://github.com/KaimingHe/deep-residual-networks). ## Resources A list of official Hugging Face and community (indicated by 🌎) resources to help you get started with ResNet. <PipelineTag pipeline="image-classification"/> - [`ResNetForImageClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb). - See also: [Image classification task guide](../tasks/image_classification) If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource. ## ResNetConfig [[autodoc]] ResNetConfig <frameworkcontent> <pt> ## ResNetModel [[autodoc]] ResNetModel - forward ## ResNetForImageClassification [[autodoc]] ResNetForImageClassification - forward </pt> <tf> ## TFResNetModel [[autodoc]] TFResNetModel - call ## TFResNetForImageClassification [[autodoc]] TFResNetForImageClassification - call </tf> <jax> ## FlaxResNetModel [[autodoc]] FlaxResNetModel - __call__ ## FlaxResNetForImageClassification [[autodoc]] FlaxResNetForImageClassification - __call__ </jax> </frameworkcontent>
transformers/docs/source/en/model_doc/resnet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/resnet.md", "repo_id": "transformers", "token_count": 1253 }
20
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Speech Encoder Decoder Models The [`SpeechEncoderDecoderModel`] can be used to initialize a speech-to-text model with any pretrained speech autoencoding model as the encoder (*e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert)) and any pretrained autoregressive model as the decoder. The effectiveness of initializing speech-sequence-to-text-sequence models with pretrained checkpoints for speech recognition and speech translation has *e.g.* been shown in [Large-Scale Self- and Semi-Supervised Learning for Speech Translation](https://arxiv.org/abs/2104.06678) by Changhan Wang, Anne Wu, Juan Pino, Alexei Baevski, Michael Auli, Alexis Conneau. An example of how to use a [`SpeechEncoderDecoderModel`] for inference can be seen in [Speech2Text2](speech_to_text_2). ## Randomly initializing `SpeechEncoderDecoderModel` from model configurations. [`SpeechEncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`Wav2Vec2Model`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder. ```python >>> from transformers import BertConfig, Wav2Vec2Config, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel >>> config_encoder = Wav2Vec2Config() >>> config_decoder = BertConfig() >>> config = SpeechEncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder) >>> model = SpeechEncoderDecoderModel(config=config) ``` ## Initialising `SpeechEncoderDecoderModel` from a pretrained encoder and a pretrained decoder. [`SpeechEncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained Transformer-based speech model, *e.g.* [Wav2Vec2](wav2vec2), [Hubert](hubert) can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder. Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized. Initializing [`SpeechEncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder). To do so, the `SpeechEncoderDecoderModel` class provides a [`SpeechEncoderDecoderModel.from_encoder_decoder_pretrained`] method. ```python >>> from transformers import SpeechEncoderDecoderModel >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained( ... "facebook/hubert-large-ll60k", "google-bert/bert-base-uncased" ... ) ``` ## Loading an existing `SpeechEncoderDecoderModel` checkpoint and perform inference. To load fine-tuned checkpoints of the `SpeechEncoderDecoderModel` class, [`SpeechEncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers. To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling. ```python >>> from transformers import Wav2Vec2Processor, SpeechEncoderDecoderModel >>> from datasets import load_dataset >>> import torch >>> # load a fine-tuned speech translation model and corresponding processor >>> model = SpeechEncoderDecoderModel.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15") >>> processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-xls-r-300m-en-to-15") >>> # let's perform inference on a piece of English speech (which we'll translate to German) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> input_values = processor(ds[0]["audio"]["array"], return_tensors="pt").input_values >>> # autoregressively generate transcription (uses greedy decoding by default) >>> generated_ids = model.generate(input_values) >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] >>> print(generated_text) Mr. Quilter ist der Apostel der Mittelschicht und wir freuen uns, sein Evangelium willkommen heißen zu können. ``` ## Training Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model on a dataset of (speech, text) pairs. As you can see, only 2 inputs are required for the model in order to compute a loss: `input_values` (which are the speech inputs) and `labels` (which are the `input_ids` of the encoded target sequence). ```python >>> from transformers import AutoTokenizer, AutoFeatureExtractor, SpeechEncoderDecoderModel >>> from datasets import load_dataset >>> encoder_id = "facebook/wav2vec2-base-960h" # acoustic model encoder >>> decoder_id = "google-bert/bert-base-uncased" # text decoder >>> feature_extractor = AutoFeatureExtractor.from_pretrained(encoder_id) >>> tokenizer = AutoTokenizer.from_pretrained(decoder_id) >>> # Combine pre-trained encoder and pre-trained decoder to form a Seq2Seq model >>> model = SpeechEncoderDecoderModel.from_encoder_decoder_pretrained(encoder_id, decoder_id) >>> model.config.decoder_start_token_id = tokenizer.cls_token_id >>> model.config.pad_token_id = tokenizer.pad_token_id >>> # load an audio input and pre-process (normalise mean/std to 0/1) >>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> input_values = feature_extractor(ds[0]["audio"]["array"], return_tensors="pt").input_values >>> # load its corresponding transcription and tokenize to generate labels >>> labels = tokenizer(ds[0]["text"], return_tensors="pt").input_ids >>> # the forward function automatically creates the correct decoder_input_ids >>> loss = model(input_values=input_values, labels=labels).loss >>> loss.backward() ``` ## SpeechEncoderDecoderConfig [[autodoc]] SpeechEncoderDecoderConfig ## SpeechEncoderDecoderModel [[autodoc]] SpeechEncoderDecoderModel - forward - from_encoder_decoder_pretrained ## FlaxSpeechEncoderDecoderModel [[autodoc]] FlaxSpeechEncoderDecoderModel - __call__ - from_encoder_decoder_pretrained
transformers/docs/source/en/model_doc/speech-encoder-decoder.md/0
{ "file_path": "transformers/docs/source/en/model_doc/speech-encoder-decoder.md", "repo_id": "transformers", "token_count": 2092 }
21
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # T5v1.1 ## Overview T5v1.1 was released in the [google-research/text-to-text-transfer-transformer](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511) repository by Colin Raffel et al. It's an improved version of the original T5 model. This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The original code can be found [here](https://github.com/google-research/text-to-text-transfer-transformer/blob/main/released_checkpoints.md#t511). ## Usage tips One can directly plug in the weights of T5v1.1 into a T5 model, like so: ```python >>> from transformers import T5ForConditionalGeneration >>> model = T5ForConditionalGeneration.from_pretrained("google/t5-v1_1-base") ``` T5 Version 1.1 includes the following improvements compared to the original T5 model: - GEGLU activation in the feed-forward hidden layer, rather than ReLU. See [this paper](https://arxiv.org/abs/2002.05202). - Dropout was turned off in pre-training (quality win). Dropout should be re-enabled during fine-tuning. - Pre-trained on C4 only without mixing in the downstream tasks. - No parameter sharing between the embedding and classifier layer. - "xl" and "xxl" replace "3B" and "11B". The model shapes are a bit different - larger `d_model` and smaller `num_heads` and `d_ff`. Note: T5 Version 1.1 was only pre-trained on [C4](https://huggingface.co/datasets/c4) excluding any supervised training. Therefore, this model has to be fine-tuned before it is usable on a downstream task, unlike the original T5 model. Since t5v1.1 was pre-trained unsupervisedly, there's no real advantage to using a task prefix during single-task fine-tuning. If you are doing multi-task fine-tuning, you should use a prefix. Google has released the following variants: - [google/t5-v1_1-small](https://huggingface.co/google/t5-v1_1-small) - [google/t5-v1_1-base](https://huggingface.co/google/t5-v1_1-base) - [google/t5-v1_1-large](https://huggingface.co/google/t5-v1_1-large) - [google/t5-v1_1-xl](https://huggingface.co/google/t5-v1_1-xl) - [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl). <Tip> Refer to [T5's documentation page](t5) for all API reference, tips, code examples and notebooks. </Tip>
transformers/docs/source/en/model_doc/t5v1.1.md/0
{ "file_path": "transformers/docs/source/en/model_doc/t5v1.1.md", "repo_id": "transformers", "token_count": 967 }
22
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # UniSpeech-SAT ## Overview The UniSpeech-SAT model was proposed in [UniSpeech-SAT: Universal Speech Representation Learning with Speaker Aware Pre-Training](https://arxiv.org/abs/2110.05752) by Sanyuan Chen, Yu Wu, Chengyi Wang, Zhengyang Chen, Zhuo Chen, Shujie Liu, Jian Wu, Yao Qian, Furu Wei, Jinyu Li, Xiangzhan Yu . The abstract from the paper is the following: *Self-supervised learning (SSL) is a long-standing goal for speech processing, since it utilizes large-scale unlabeled data and avoids extensive human labeling. Recent years witness great successes in applying self-supervised learning in speech recognition, while limited exploration was attempted in applying SSL for modeling speaker characteristics. In this paper, we aim to improve the existing SSL framework for speaker representation learning. Two methods are introduced for enhancing the unsupervised speaker information extraction. First, we apply the multi-task learning to the current SSL framework, where we integrate the utterance-wise contrastive loss with the SSL objective function. Second, for better speaker discrimination, we propose an utterance mixing strategy for data augmentation, where additional overlapped utterances are created unsupervisedly and incorporate during training. We integrate the proposed methods into the HuBERT framework. Experiment results on SUPERB benchmark show that the proposed system achieves state-of-the-art performance in universal representation learning, especially for speaker identification oriented tasks. An ablation study is performed verifying the efficacy of each proposed method. Finally, we scale up training dataset to 94 thousand hours public audio data and achieve further performance improvement in all SUPERB tasks.* This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The Authors' code can be found [here](https://github.com/microsoft/UniSpeech/tree/main/UniSpeech-SAT). ## Usage tips - UniSpeechSat is a speech model that accepts a float array corresponding to the raw waveform of the speech signal. Please use [`Wav2Vec2Processor`] for the feature extraction. - UniSpeechSat model can be fine-tuned using connectionist temporal classification (CTC) so the model output has to be decoded using [`Wav2Vec2CTCTokenizer`]. - UniSpeechSat performs especially well on speaker verification, speaker identification, and speaker diarization tasks. ## Resources - [Audio classification task guide](../tasks/audio_classification) - [Automatic speech recognition task guide](../tasks/asr) ## UniSpeechSatConfig [[autodoc]] UniSpeechSatConfig ## UniSpeechSat specific outputs [[autodoc]] models.unispeech_sat.modeling_unispeech_sat.UniSpeechSatForPreTrainingOutput ## UniSpeechSatModel [[autodoc]] UniSpeechSatModel - forward ## UniSpeechSatForCTC [[autodoc]] UniSpeechSatForCTC - forward ## UniSpeechSatForSequenceClassification [[autodoc]] UniSpeechSatForSequenceClassification - forward ## UniSpeechSatForAudioFrameClassification [[autodoc]] UniSpeechSatForAudioFrameClassification - forward ## UniSpeechSatForXVector [[autodoc]] UniSpeechSatForXVector - forward ## UniSpeechSatForPreTraining [[autodoc]] UniSpeechSatForPreTraining - forward
transformers/docs/source/en/model_doc/unispeech-sat.md/0
{ "file_path": "transformers/docs/source/en/model_doc/unispeech-sat.md", "repo_id": "transformers", "token_count": 1045 }
23
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # ViTDet ## Overview The ViTDet model was proposed in [Exploring Plain Vision Transformer Backbones for Object Detection](https://arxiv.org/abs/2203.16527) by Yanghao Li, Hanzi Mao, Ross Girshick, Kaiming He. VitDet leverages the plain [Vision Transformer](vit) for the task of object detection. The abstract from the paper is the following: *We explore the plain, non-hierarchical Vision Transformer (ViT) as a backbone network for object detection. This design enables the original ViT architecture to be fine-tuned for object detection without needing to redesign a hierarchical backbone for pre-training. With minimal adaptations for fine-tuning, our plain-backbone detector can achieve competitive results. Surprisingly, we observe: (i) it is sufficient to build a simple feature pyramid from a single-scale feature map (without the common FPN design) and (ii) it is sufficient to use window attention (without shifting) aided with very few cross-window propagation blocks. With plain ViT backbones pre-trained as Masked Autoencoders (MAE), our detector, named ViTDet, can compete with the previous leading methods that were all based on hierarchical backbones, reaching up to 61.3 AP_box on the COCO dataset using only ImageNet-1K pre-training. We hope our study will draw attention to research on plain-backbone detectors.* This model was contributed by [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/facebookresearch/detectron2/tree/main/projects/ViTDet). Tips: - At the moment, only the backbone is available. ## VitDetConfig [[autodoc]] VitDetConfig ## VitDetModel [[autodoc]] VitDetModel - forward
transformers/docs/source/en/model_doc/vitdet.md/0
{ "file_path": "transformers/docs/source/en/model_doc/vitdet.md", "repo_id": "transformers", "token_count": 579 }
24
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # XLM-V ## Overview XLM-V is multilingual language model with a one million token vocabulary trained on 2.5TB of data from Common Crawl (same as XLM-R). It was introduced in the [XLM-V: Overcoming the Vocabulary Bottleneck in Multilingual Masked Language Models](https://arxiv.org/abs/2301.10472) paper by Davis Liang, Hila Gonen, Yuning Mao, Rui Hou, Naman Goyal, Marjan Ghazvininejad, Luke Zettlemoyer and Madian Khabsa. From the abstract of the XLM-V paper: *Large multilingual language models typically rely on a single vocabulary shared across 100+ languages. As these models have increased in parameter count and depth, vocabulary size has remained largely unchanged. This vocabulary bottleneck limits the representational capabilities of multilingual models like XLM-R. In this paper, we introduce a new approach for scaling to very large multilingual vocabularies by de-emphasizing token sharing between languages with little lexical overlap and assigning vocabulary capacity to achieve sufficient coverage for each individual language. Tokenizations using our vocabulary are typically more semantically meaningful and shorter compared to XLM-R. Leveraging this improved vocabulary, we train XLM-V, a multilingual language model with a one million token vocabulary. XLM-V outperforms XLM-R on every task we tested on ranging from natural language inference (XNLI), question answering (MLQA, XQuAD, TyDiQA), and named entity recognition (WikiAnn) to low-resource tasks (Americas NLI, MasakhaNER).* This model was contributed by [stefan-it](https://huggingface.co/stefan-it), including detailed experiments with XLM-V on downstream tasks. The experiments repository can be found [here](https://github.com/stefan-it/xlm-v-experiments). ## Usage tips - XLM-V is compatible with the XLM-RoBERTa model architecture, only model weights from [`fairseq`](https://github.com/facebookresearch/fairseq) library had to be converted. - The `XLMTokenizer` implementation is used to load the vocab and performs tokenization. A XLM-V (base size) model is available under the [`facebook/xlm-v-base`](https://huggingface.co/facebook/xlm-v-base) identifier. <Tip> XLM-V architecture is the same as XLM-RoBERTa, refer to [XLM-RoBERTa documentation](xlm-roberta) for API reference, and examples. </Tip>
transformers/docs/source/en/model_doc/xlm-v.md/0
{ "file_path": "transformers/docs/source/en/model_doc/xlm-v.md", "repo_id": "transformers", "token_count": 809 }
25
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Philosophy 🤗 Transformers is an opinionated library built for: - machine learning researchers and educators seeking to use, study or extend large-scale Transformers models. - hands-on practitioners who want to fine-tune those models or serve them in production, or both. - engineers who just want to download a pretrained model and use it to solve a given machine learning task. The library was designed with two strong goals in mind: 1. Be as easy and fast to use as possible: - We strongly limited the number of user-facing abstractions to learn, in fact, there are almost no abstractions, just three standard classes required to use each model: [configuration](main_classes/configuration), [models](main_classes/model), and a preprocessing class ([tokenizer](main_classes/tokenizer) for NLP, [image processor](main_classes/image_processor) for vision, [feature extractor](main_classes/feature_extractor) for audio, and [processor](main_classes/processors) for multimodal inputs). - All of these classes can be initialized in a simple and unified way from pretrained instances by using a common `from_pretrained()` method which downloads (if needed), caches and loads the related class instance and associated data (configurations' hyperparameters, tokenizers' vocabulary, and models' weights) from a pretrained checkpoint provided on [Hugging Face Hub](https://huggingface.co/models) or your own saved checkpoint. - On top of those three base classes, the library provides two APIs: [`pipeline`] for quickly using a model for inference on a given task and [`Trainer`] to quickly train or fine-tune a PyTorch model (all TensorFlow models are compatible with `Keras.fit`). - As a consequence, this library is NOT a modular toolbox of building blocks for neural nets. If you want to extend or build upon the library, just use regular Python, PyTorch, TensorFlow, Keras modules and inherit from the base classes of the library to reuse functionalities like model loading and saving. If you'd like to learn more about our coding philosophy for models, check out our [Repeat Yourself](https://huggingface.co/blog/transformers-design-philosophy) blog post. 2. Provide state-of-the-art models with performances as close as possible to the original models: - We provide at least one example for each architecture which reproduces a result provided by the official authors of said architecture. - The code is usually as close to the original code base as possible which means some PyTorch code may be not as *pytorchic* as it could be as a result of being converted TensorFlow code and vice versa. A few other goals: - Expose the models' internals as consistently as possible: - We give access, using a single API, to the full hidden-states and attention weights. - The preprocessing classes and base model APIs are standardized to easily switch between models. - Incorporate a subjective selection of promising tools for fine-tuning and investigating these models: - A simple and consistent way to add new tokens to the vocabulary and embeddings for fine-tuning. - Simple ways to mask and prune Transformer heads. - Easily switch between PyTorch, TensorFlow 2.0 and Flax, allowing training with one framework and inference with another. ## Main concepts The library is built around three types of classes for each model: - **Model classes** can be PyTorch models ([torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)), Keras models ([tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)) or JAX/Flax models ([flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html)) that work with the pretrained weights provided in the library. - **Configuration classes** store the hyperparameters required to build a model (such as the number of layers and hidden size). You don't always need to instantiate these yourself. In particular, if you are using a pretrained model without any modification, creating the model will automatically take care of instantiating the configuration (which is part of the model). - **Preprocessing classes** convert the raw data into a format accepted by the model. A [tokenizer](main_classes/tokenizer) stores the vocabulary for each model and provide methods for encoding and decoding strings in a list of token embedding indices to be fed to a model. [Image processors](main_classes/image_processor) preprocess vision inputs, [feature extractors](main_classes/feature_extractor) preprocess audio inputs, and a [processor](main_classes/processors) handles multimodal inputs. All these classes can be instantiated from pretrained instances, saved locally, and shared on the Hub with three methods: - `from_pretrained()` lets you instantiate a model, configuration, and preprocessing class from a pretrained version either provided by the library itself (the supported models can be found on the [Model Hub](https://huggingface.co/models)) or stored locally (or on a server) by the user. - `save_pretrained()` lets you save a model, configuration, and preprocessing class locally so that it can be reloaded using `from_pretrained()`. - `push_to_hub()` lets you share a model, configuration, and a preprocessing class to the Hub, so it is easily accessible to everyone.
transformers/docs/source/en/philosophy.md/0
{ "file_path": "transformers/docs/source/en/philosophy.md", "repo_id": "transformers", "token_count": 1518 }
26
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Optimum The [Optimum](https://huggingface.co/docs/optimum/index) library supports quantization for Intel, Furiosa, ONNX Runtime, GPTQ, and lower-level PyTorch quantization functions. Consider using Optimum for quantization if you're using specific and optimized hardware like Intel CPUs, Furiosa NPUs or a model accelerator like ONNX Runtime.
transformers/docs/source/en/quantization/optimum.md/0
{ "file_path": "transformers/docs/source/en/quantization/optimum.md", "repo_id": "transformers", "token_count": 277 }
27
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Image Feature Extraction [[open-in-colab]] Image feature extraction is the task of extracting semantically meaningful features given an image. This has many use cases, including image similarity and image retrieval. Moreover, most computer vision models can be used for image feature extraction, where one can remove the task-specific head (image classification, object detection etc) and get the features. These features are very useful on a higher level: edge detection, corner detection and so on. They may also contain information about the real world (e.g. what a cat looks like) depending on how deep the model is. Therefore, these outputs can be used to train new classifiers on a specific dataset. In this guide, you will: - Learn to build a simple image similarity system on top of the `image-feature-extraction` pipeline. - Accomplish the same task with bare model inference. ## Image Similarity using `image-feature-extraction` Pipeline We have two images of cats sitting on top of fish nets, one of them is generated. ```python from PIL import Image import requests img_urls = ["https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.png", "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/cats.jpeg"] image_real = Image.open(requests.get(img_urls[0], stream=True).raw).convert("RGB") image_gen = Image.open(requests.get(img_urls[1], stream=True).raw).convert("RGB") ``` Let's see the pipeline in action. First, initialize the pipeline. If you don't pass any model to it, the pipeline will be automatically initialized with [google/vit-base-patch16-224](google/vit-base-patch16-224). If you'd like to calculate similarity, set `pool` to True. ```python import torch from transformers import pipeline from accelerate.test_utils.testing import get_backend # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) DEVICE, _, _ = get_backend() pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-384", device=DEVICE, pool=True) ``` To infer with `pipe` pass both images to it. ```python outputs = pipe([image_real, image_gen]) ``` The output contains pooled embeddings of those two images. ```python # get the length of a single output print(len(outputs[0][0])) # show outputs print(outputs) # 768 # [[[-0.03909236937761307, 0.43381670117378235, -0.06913255900144577, ``` To get the similarity score, we need to pass them to a similarity function. ```python from torch.nn.functional import cosine_similarity similarity_score = cosine_similarity(torch.Tensor(outputs[0]), torch.Tensor(outputs[1]), dim=1) print(similarity_score) # tensor([0.6043]) ``` If you want to get the last hidden states before pooling, avoid passing any value for the `pool` parameter, as it is set to `False` by default. These hidden states are useful for training new classifiers or models based on the features from the model. ```python pipe = pipeline(task="image-feature-extraction", model_name="google/vit-base-patch16-224", device=DEVICE) outputs = pipe(image_real) ``` Since the outputs are unpooled, we get the last hidden states where the first dimension is the batch size, and the last two are the embedding shape. ```python import numpy as np print(np.array(outputs).shape) # (1, 197, 768) ``` ## Getting Features and Similarities using `AutoModel` We can also use `AutoModel` class of transformers to get the features. `AutoModel` loads any transformers model with no task-specific head, and we can use this to get the features. ```python from transformers import AutoImageProcessor, AutoModel processor = AutoImageProcessor.from_pretrained("google/vit-base-patch16-224") model = AutoModel.from_pretrained("google/vit-base-patch16-224").to(DEVICE) ``` Let's write a simple function for inference. We will pass the inputs to the `processor` first and pass its outputs to the `model`. ```python def infer(image): inputs = processor(image, return_tensors="pt").to(DEVICE) outputs = model(**inputs) return outputs.pooler_output ``` We can pass the images directly to this function and get the embeddings. ```python embed_real = infer(image_real) embed_gen = infer(image_gen) ``` We can get the similarity again over the embeddings. ```python from torch.nn.functional import cosine_similarity similarity_score = cosine_similarity(embed_real, embed_gen, dim=1) print(similarity_score) # tensor([0.6061], device='cuda:0', grad_fn=<SumBackward1>) ```
transformers/docs/source/en/tasks/image_feature_extraction.md/0
{ "file_path": "transformers/docs/source/en/tasks/image_feature_extraction.md", "repo_id": "transformers", "token_count": 1562 }
28
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Text to speech [[open-in-colab]] Text-to-speech (TTS) is the task of creating natural-sounding speech from text, where the speech can be generated in multiple languages and for multiple speakers. Several text-to-speech models are currently available in 🤗 Transformers, such as [Bark](../model_doc/bark), [MMS](../model_doc/mms), [VITS](../model_doc/vits) and [SpeechT5](../model_doc/speecht5). You can easily generate audio using the `"text-to-audio"` pipeline (or its alias - `"text-to-speech"`). Some models, like Bark, can also be conditioned to generate non-verbal communications such as laughing, sighing and crying, or even add music. Here's an example of how you would use the `"text-to-speech"` pipeline with Bark: ```py >>> from transformers import pipeline >>> pipe = pipeline("text-to-speech", model="suno/bark-small") >>> text = "[clears throat] This is a test ... and I just took a long pause." >>> output = pipe(text) ``` Here's a code snippet you can use to listen to the resulting audio in a notebook: ```python >>> from IPython.display import Audio >>> Audio(output["audio"], rate=output["sampling_rate"]) ``` For more examples on what Bark and other pretrained TTS models can do, refer to our [Audio course](https://huggingface.co/learn/audio-course/chapter6/pre-trained_models). If you are looking to fine-tune a TTS model, the only text-to-speech models currently available in 🤗 Transformers are [SpeechT5](model_doc/speecht5) and [FastSpeech2Conformer](model_doc/fastspeech2_conformer), though more will be added in the future. SpeechT5 is pre-trained on a combination of speech-to-text and text-to-speech data, allowing it to learn a unified space of hidden representations shared by both text and speech. This means that the same pre-trained model can be fine-tuned for different tasks. Furthermore, SpeechT5 supports multiple speakers through x-vector speaker embeddings. The remainder of this guide illustrates how to: 1. Fine-tune [SpeechT5](../model_doc/speecht5) that was originally trained on English speech on the Dutch (`nl`) language subset of the [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) dataset. 2. Use your refined model for inference in one of two ways: using a pipeline or directly. Before you begin, make sure you have all the necessary libraries installed: ```bash pip install datasets soundfile speechbrain accelerate ``` Install 🤗Transformers from source as not all the SpeechT5 features have been merged into an official release yet: ```bash pip install git+https://github.com/huggingface/transformers.git ``` <Tip> To follow this guide you will need a GPU. If you're working in a notebook, run the following line to check if a GPU is available: ```bash !nvidia-smi ``` or alternatively for AMD GPUs: ```bash !rocm-smi ``` </Tip> We encourage you to log in to your Hugging Face account to upload and share your model with the community. When prompted, enter your token to log in: ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load the dataset [VoxPopuli](https://huggingface.co/datasets/facebook/voxpopuli) is a large-scale multilingual speech corpus consisting of data sourced from 2009-2020 European Parliament event recordings. It contains labelled audio-transcription data for 15 European languages. In this guide, we are using the Dutch language subset, feel free to pick another subset. Note that VoxPopuli or any other automated speech recognition (ASR) dataset may not be the most suitable option for training TTS models. The features that make it beneficial for ASR, such as excessive background noise, are typically undesirable in TTS. However, finding top-quality, multilingual, and multi-speaker TTS datasets can be quite challenging. Let's load the data: ```py >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("facebook/voxpopuli", "nl", split="train") >>> len(dataset) 20968 ``` 20968 examples should be sufficient for fine-tuning. SpeechT5 expects audio data to have a sampling rate of 16 kHz, so make sure the examples in the dataset meet this requirement: ```py dataset = dataset.cast_column("audio", Audio(sampling_rate=16000)) ``` ## Preprocess the data Let's begin by defining the model checkpoint to use and loading the appropriate processor: ```py >>> from transformers import SpeechT5Processor >>> checkpoint = "microsoft/speecht5_tts" >>> processor = SpeechT5Processor.from_pretrained(checkpoint) ``` ### Text cleanup for SpeechT5 tokenization Start by cleaning up the text data. You'll need the tokenizer part of the processor to process the text: ```py >>> tokenizer = processor.tokenizer ``` The dataset examples contain `raw_text` and `normalized_text` features. When deciding which feature to use as the text input, consider that the SpeechT5 tokenizer doesn't have any tokens for numbers. In `normalized_text` the numbers are written out as text. Thus, it is a better fit, and we recommend using `normalized_text` as input text. Because SpeechT5 was trained on the English language, it may not recognize certain characters in the Dutch dataset. If left as is, these characters will be converted to `<unk>` tokens. However, in Dutch, certain characters like `à` are used to stress syllables. In order to preserve the meaning of the text, we can replace this character with a regular `a`. To identify unsupported tokens, extract all unique characters in the dataset using the `SpeechT5Tokenizer` which works with characters as tokens. To do this, write the `extract_all_chars` mapping function that concatenates the transcriptions from all examples into one string and converts it to a set of characters. Make sure to set `batched=True` and `batch_size=-1` in `dataset.map()` so that all transcriptions are available at once for the mapping function. ```py >>> def extract_all_chars(batch): ... all_text = " ".join(batch["normalized_text"]) ... vocab = list(set(all_text)) ... return {"vocab": [vocab], "all_text": [all_text]} >>> vocabs = dataset.map( ... extract_all_chars, ... batched=True, ... batch_size=-1, ... keep_in_memory=True, ... remove_columns=dataset.column_names, ... ) >>> dataset_vocab = set(vocabs["vocab"][0]) >>> tokenizer_vocab = {k for k, _ in tokenizer.get_vocab().items()} ``` Now you have two sets of characters: one with the vocabulary from the dataset and one with the vocabulary from the tokenizer. To identify any unsupported characters in the dataset, you can take the difference between these two sets. The resulting set will contain the characters that are in the dataset but not in the tokenizer. ```py >>> dataset_vocab - tokenizer_vocab {' ', 'à', 'ç', 'è', 'ë', 'í', 'ï', 'ö', 'ü'} ``` To handle the unsupported characters identified in the previous step, define a function that maps these characters to valid tokens. Note that spaces are already replaced by `▁` in the tokenizer and don't need to be handled separately. ```py >>> replacements = [ ... ("à", "a"), ... ("ç", "c"), ... ("è", "e"), ... ("ë", "e"), ... ("í", "i"), ... ("ï", "i"), ... ("ö", "o"), ... ("ü", "u"), ... ] >>> def cleanup_text(inputs): ... for src, dst in replacements: ... inputs["normalized_text"] = inputs["normalized_text"].replace(src, dst) ... return inputs >>> dataset = dataset.map(cleanup_text) ``` Now that you have dealt with special characters in the text, it's time to shift focus to the audio data. ### Speakers The VoxPopuli dataset includes speech from multiple speakers, but how many speakers are represented in the dataset? To determine this, we can count the number of unique speakers and the number of examples each speaker contributes to the dataset. With a total of 20,968 examples in the dataset, this information will give us a better understanding of the distribution of speakers and examples in the data. ```py >>> from collections import defaultdict >>> speaker_counts = defaultdict(int) >>> for speaker_id in dataset["speaker_id"]: ... speaker_counts[speaker_id] += 1 ``` By plotting a histogram you can get a sense of how much data there is for each speaker. ```py >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.hist(speaker_counts.values(), bins=20) >>> plt.ylabel("Speakers") >>> plt.xlabel("Examples") >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/tts_speakers_histogram.png" alt="Speakers histogram"/> </div> The histogram reveals that approximately one-third of the speakers in the dataset have fewer than 100 examples, while around ten speakers have more than 500 examples. To improve training efficiency and balance the dataset, we can limit the data to speakers with between 100 and 400 examples. ```py >>> def select_speaker(speaker_id): ... return 100 <= speaker_counts[speaker_id] <= 400 >>> dataset = dataset.filter(select_speaker, input_columns=["speaker_id"]) ``` Let's check how many speakers remain: ```py >>> len(set(dataset["speaker_id"])) 42 ``` Let's see how many examples are left: ```py >>> len(dataset) 9973 ``` You are left with just under 10,000 examples from approximately 40 unique speakers, which should be sufficient. Note that some speakers with few examples may actually have more audio available if the examples are long. However, determining the total amount of audio for each speaker requires scanning through the entire dataset, which is a time-consuming process that involves loading and decoding each audio file. As such, we have chosen to skip this step here. ### Speaker embeddings To enable the TTS model to differentiate between multiple speakers, you'll need to create a speaker embedding for each example. The speaker embedding is an additional input into the model that captures a particular speaker's voice characteristics. To generate these speaker embeddings, use the pre-trained [spkrec-xvect-voxceleb](https://huggingface.co/speechbrain/spkrec-xvect-voxceleb) model from SpeechBrain. Create a function `create_speaker_embedding()` that takes an input audio waveform and outputs a 512-element vector containing the corresponding speaker embedding. ```py >>> import os >>> import torch >>> from speechbrain.inference.classifiers import EncoderClassifier >>> from accelerate.test_utils.testing import get_backend >>> spk_model_name = "speechbrain/spkrec-xvect-voxceleb" >>> device, _, _ = get_backend() # automatically detects the underlying device type (CUDA, CPU, XPU, MPS, etc.) >>> speaker_model = EncoderClassifier.from_hparams( ... source=spk_model_name, ... run_opts={"device": device}, ... savedir=os.path.join("/tmp", spk_model_name), ... ) >>> def create_speaker_embedding(waveform): ... with torch.no_grad(): ... speaker_embeddings = speaker_model.encode_batch(torch.tensor(waveform)) ... speaker_embeddings = torch.nn.functional.normalize(speaker_embeddings, dim=2) ... speaker_embeddings = speaker_embeddings.squeeze().cpu().numpy() ... return speaker_embeddings ``` It's important to note that the `speechbrain/spkrec-xvect-voxceleb` model was trained on English speech from the VoxCeleb dataset, whereas the training examples in this guide are in Dutch. While we believe that this model will still generate reasonable speaker embeddings for our Dutch dataset, this assumption may not hold true in all cases. For optimal results, we recommend training an X-vector model on the target speech first. This will ensure that the model is better able to capture the unique voice characteristics present in the Dutch language. ### Processing the dataset Finally, let's process the data into the format the model expects. Create a `prepare_dataset` function that takes in a single example and uses the `SpeechT5Processor` object to tokenize the input text and load the target audio into a log-mel spectrogram. It should also add the speaker embeddings as an additional input. ```py >>> def prepare_dataset(example): ... audio = example["audio"] ... example = processor( ... text=example["normalized_text"], ... audio_target=audio["array"], ... sampling_rate=audio["sampling_rate"], ... return_attention_mask=False, ... ) ... # strip off the batch dimension ... example["labels"] = example["labels"][0] ... # use SpeechBrain to obtain x-vector ... example["speaker_embeddings"] = create_speaker_embedding(audio["array"]) ... return example ``` Verify the processing is correct by looking at a single example: ```py >>> processed_example = prepare_dataset(dataset[0]) >>> list(processed_example.keys()) ['input_ids', 'labels', 'stop_labels', 'speaker_embeddings'] ``` Speaker embeddings should be a 512-element vector: ```py >>> processed_example["speaker_embeddings"].shape (512,) ``` The labels should be a log-mel spectrogram with 80 mel bins. ```py >>> import matplotlib.pyplot as plt >>> plt.figure() >>> plt.imshow(processed_example["labels"].T) >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/tts_logmelspectrogram_1.png" alt="Log-mel spectrogram with 80 mel bins"/> </div> Side note: If you find this spectrogram confusing, it may be due to your familiarity with the convention of placing low frequencies at the bottom and high frequencies at the top of a plot. However, when plotting spectrograms as an image using the matplotlib library, the y-axis is flipped and the spectrograms appear upside down. Now apply the processing function to the entire dataset. This will take between 5 and 10 minutes. ```py >>> dataset = dataset.map(prepare_dataset, remove_columns=dataset.column_names) ``` You'll see a warning saying that some examples in the dataset are longer than the maximum input length the model can handle (600 tokens). Remove those examples from the dataset. Here we go even further and to allow for larger batch sizes we remove anything over 200 tokens. ```py >>> def is_not_too_long(input_ids): ... input_length = len(input_ids) ... return input_length < 200 >>> dataset = dataset.filter(is_not_too_long, input_columns=["input_ids"]) >>> len(dataset) 8259 ``` Next, create a basic train/test split: ```py >>> dataset = dataset.train_test_split(test_size=0.1) ``` ### Data collator In order to combine multiple examples into a batch, you need to define a custom data collator. This collator will pad shorter sequences with padding tokens, ensuring that all examples have the same length. For the spectrogram labels, the padded portions are replaced with the special value `-100`. This special value instructs the model to ignore that part of the spectrogram when calculating the spectrogram loss. ```py >>> from dataclasses import dataclass >>> from typing import Any, Dict, List, Union >>> @dataclass ... class TTSDataCollatorWithPadding: ... processor: Any ... def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: ... input_ids = [{"input_ids": feature["input_ids"]} for feature in features] ... label_features = [{"input_values": feature["labels"]} for feature in features] ... speaker_features = [feature["speaker_embeddings"] for feature in features] ... # collate the inputs and targets into a batch ... batch = processor.pad(input_ids=input_ids, labels=label_features, return_tensors="pt") ... # replace padding with -100 to ignore loss correctly ... batch["labels"] = batch["labels"].masked_fill(batch.decoder_attention_mask.unsqueeze(-1).ne(1), -100) ... # not used during fine-tuning ... del batch["decoder_attention_mask"] ... # round down target lengths to multiple of reduction factor ... if model.config.reduction_factor > 1: ... target_lengths = torch.tensor([len(feature["input_values"]) for feature in label_features]) ... target_lengths = target_lengths.new( ... [length - length % model.config.reduction_factor for length in target_lengths] ... ) ... max_length = max(target_lengths) ... batch["labels"] = batch["labels"][:, :max_length] ... # also add in the speaker embeddings ... batch["speaker_embeddings"] = torch.tensor(speaker_features) ... return batch ``` In SpeechT5, the input to the decoder part of the model is reduced by a factor 2. In other words, it throws away every other timestep from the target sequence. The decoder then predicts a sequence that is twice as long. Since the original target sequence length may be odd, the data collator makes sure to round the maximum length of the batch down to be a multiple of 2. ```py >>> data_collator = TTSDataCollatorWithPadding(processor=processor) ``` ## Train the model Load the pre-trained model from the same checkpoint as you used for loading the processor: ```py >>> from transformers import SpeechT5ForTextToSpeech >>> model = SpeechT5ForTextToSpeech.from_pretrained(checkpoint) ``` The `use_cache=True` option is incompatible with gradient checkpointing. Disable it for training. ```py >>> model.config.use_cache = False ``` Define the training arguments. Here we are not computing any evaluation metrics during the training process. Instead, we'll only look at the loss: ```python >>> from transformers import Seq2SeqTrainingArguments >>> training_args = Seq2SeqTrainingArguments( ... output_dir="speecht5_finetuned_voxpopuli_nl", # change to a repo name of your choice ... per_device_train_batch_size=4, ... gradient_accumulation_steps=8, ... learning_rate=1e-5, ... warmup_steps=500, ... max_steps=4000, ... gradient_checkpointing=True, ... fp16=True, ... eval_strategy="steps", ... per_device_eval_batch_size=2, ... save_steps=1000, ... eval_steps=1000, ... logging_steps=25, ... report_to=["tensorboard"], ... load_best_model_at_end=True, ... greater_is_better=False, ... label_names=["labels"], ... push_to_hub=True, ... ) ``` Instantiate the `Trainer` object and pass the model, dataset, and data collator to it. ```py >>> from transformers import Seq2SeqTrainer >>> trainer = Seq2SeqTrainer( ... args=training_args, ... model=model, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... data_collator=data_collator, ... processing_class=processor, ... ) ``` And with that, you're ready to start training! Training will take several hours. Depending on your GPU, it is possible that you will encounter a CUDA "out-of-memory" error when you start training. In this case, you can reduce the `per_device_train_batch_size` incrementally by factors of 2 and increase `gradient_accumulation_steps` by 2x to compensate. ```py >>> trainer.train() ``` To be able to use your checkpoint with a pipeline, make sure to save the processor with the checkpoint: ```py >>> processor.save_pretrained("YOUR_ACCOUNT_NAME/speecht5_finetuned_voxpopuli_nl") ``` Push the final model to the 🤗 Hub: ```py >>> trainer.push_to_hub() ``` ## Inference ### Inference with a pipeline Great, now that you've fine-tuned a model, you can use it for inference! First, let's see how you can use it with a corresponding pipeline. Let's create a `"text-to-speech"` pipeline with your checkpoint: ```py >>> from transformers import pipeline >>> pipe = pipeline("text-to-speech", model="YOUR_ACCOUNT_NAME/speecht5_finetuned_voxpopuli_nl") ``` Pick a piece of text in Dutch you'd like narrated, e.g.: ```py >>> text = "hallo allemaal, ik praat nederlands. groetjes aan iedereen!" ``` To use SpeechT5 with the pipeline, you'll need a speaker embedding. Let's get it from an example in the test dataset: ```py >>> example = dataset["test"][304] >>> speaker_embeddings = torch.tensor(example["speaker_embeddings"]).unsqueeze(0) ``` Now you can pass the text and speaker embeddings to the pipeline, and it will take care of the rest: ```py >>> forward_params = {"speaker_embeddings": speaker_embeddings} >>> output = pipe(text, forward_params=forward_params) >>> output {'audio': array([-6.82714235e-05, -4.26525949e-04, 1.06134125e-04, ..., -1.22392643e-03, -7.76011671e-04, 3.29112721e-04], dtype=float32), 'sampling_rate': 16000} ``` You can then listen to the result: ```py >>> from IPython.display import Audio >>> Audio(output['audio'], rate=output['sampling_rate']) ``` ### Run inference manually You can achieve the same inference results without using the pipeline, however, more steps will be required. Load the model from the 🤗 Hub: ```py >>> model = SpeechT5ForTextToSpeech.from_pretrained("YOUR_ACCOUNT/speecht5_finetuned_voxpopuli_nl") ``` Pick an example from the test dataset obtain a speaker embedding. ```py >>> example = dataset["test"][304] >>> speaker_embeddings = torch.tensor(example["speaker_embeddings"]).unsqueeze(0) ``` Define the input text and tokenize it. ```py >>> text = "hallo allemaal, ik praat nederlands. groetjes aan iedereen!" >>> inputs = processor(text=text, return_tensors="pt") ``` Create a spectrogram with your model: ```py >>> spectrogram = model.generate_speech(inputs["input_ids"], speaker_embeddings) ``` Visualize the spectrogram, if you'd like to: ```py >>> plt.figure() >>> plt.imshow(spectrogram.T) >>> plt.show() ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/tts_logmelspectrogram_2.png" alt="Generated log-mel spectrogram"/> </div> Finally, use the vocoder to turn the spectrogram into sound. ```py >>> with torch.no_grad(): ... speech = vocoder(spectrogram) >>> from IPython.display import Audio >>> Audio(speech.numpy(), rate=16000) ``` In our experience, obtaining satisfactory results from this model can be challenging. The quality of the speaker embeddings appears to be a significant factor. Since SpeechT5 was pre-trained with English x-vectors, it performs best when using English speaker embeddings. If the synthesized speech sounds poor, try using a different speaker embedding. Increasing the training duration is also likely to enhance the quality of the results. Even so, the speech clearly is Dutch instead of English, and it does capture the voice characteristics of the speaker (compare to the original audio in the example). Another thing to experiment with is the model's configuration. For example, try using `config.reduction_factor = 1` to see if this improves the results. Finally, it is essential to consider ethical considerations. Although TTS technology has numerous useful applications, it may also be used for malicious purposes, such as impersonating someone's voice without their knowledge or consent. Please use TTS judiciously and responsibly.
transformers/docs/source/en/tasks/text-to-speech.md/0
{ "file_path": "transformers/docs/source/en/tasks/text-to-speech.md", "repo_id": "transformers", "token_count": 7299 }
29
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Glosario Este glosario define términos generales de aprendizaje automático y términos relacionados con 🤗 Transformers para ayudarte a comprender mejor la documentación. ## A ### attention mask La máscara de atención es un argumento opcional utilizado al agrupar secuencias. <Youtube id="M6adb1j2jPI"/> Este argumento indica al modelo qué tokens deben recibir atención y cuáles no. Por ejemplo, considera estas dos secuencias: ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence_a = "This is a short sequence." >>> sequence_b = "This is a rather long sequence. It is at least longer than the sequence A." >>> encoded_sequence_a = tokenizer(sequence_a)["input_ids"] >>> encoded_sequence_b = tokenizer(sequence_b)["input_ids"] ``` Las versiones codificadas tienen longitudes diferentes: ```python >>> len(encoded_sequence_a), len(encoded_sequence_b) (8, 19) ``` Por lo tanto, no podemos colocarlas juntas en el mismo tensor tal cual. La primera secuencia necesita ser rellenada hasta la longitud de la segunda, o la segunda necesita ser truncada hasta la longitud de la primera. En el primer caso, la lista de IDs se extenderá con los índices de relleno. Podemos pasar una lista al tokenizador y pedirle que realice el relleno de esta manera: ```python >>> padded_sequences = tokenizer([sequence_a, sequence_b], padding=True) ``` Podemos ver que se han agregado ceros a la derecha de la primera oración para que tenga la misma longitud que la segunda: ```python >>> padded_sequences["input_ids"] [[101, 1188, 1110, 170, 1603, 4954, 119, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [101, 1188, 1110, 170, 1897, 1263, 4954, 119, 1135, 1110, 1120, 1655, 2039, 1190, 1103, 4954, 138, 119, 102]] ``` Esto luego se puede convertir en un tensor en PyTorch o TensorFlow. La máscara de atención es un tensor binario que indica la posición de los índices de relleno para que el modelo no los tenga en cuenta. Para el [`BertTokenizer`], `1` indica un valor al que se debe prestar atención, mientras que `0` indica un valor de relleno. Esta máscara de atención está en el diccionario devuelto por el tokenizador bajo la clave "attention_mask": ```python >>> padded_sequences["attention_mask"] [[1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ``` ### autoencoding models Consulta [modelos de codificación](#encoder-models) y [modelado de lenguaje enmascarado](#masked-language-modeling-mlm) ### autoregressive models Consulta [modelado de lenguaje causal](#causal-language-modeling) y [modelos de decodificación](#decoder-models) ## B ### backbone La columna vertebral, backbone en inglés, es la red (embeddings y layers) que produce los estados ocultos o características crudas. Normalmente, está conectado a una [cabecera](#head), que acepta las características como entrada para hacer una predicción. Por ejemplo, [`ViTModel`] es una columna vertebral sin una cabecera específica encima. Otros modelos también pueden usar [`VitModel`] como columna vertebral, como por ejemplo [DPT](model_doc/dpt). ## C ### causal language modeling Una tarea de preentrenamiento donde el modelo lee los textos en orden y tiene que predecir la siguiente palabra. Generalmente, se realiza leyendo toda la oración, pero utilizando una máscara dentro del modelo para ocultar los tokens futuros en un cierto paso de tiempo. ### channel Las imágenes a color están compuestas por alguna combinación de valores en tres canales: rojo, verde y azul (RGB), y las imágenes en escala de grises solo tienen un canal. En 🤗 Transformers, el canal puede ser la primera o última dimensión del tensor de una imagen: [`n_channels`, `height`, `width`] o [`height`, `width`, `n_channels`]. ### connectionist temporal classification (CTC) Un algoritmo que permite que un modelo aprenda sin saber exactamente cómo están alineadas la entrada y la salida; CTC calcula la distribución de todas las salidas posibles para una entrada dada y elige la salida más probable de ella. CTC se utiliza comúnmente en tareas de reconocimiento de voz porque el habla no siempre se alinea perfectamente con la transcripción debido a diversas razones, como las diferentes velocidades de habla de los oradores. ### convolution Un tipo de capa en una red neuronal donde la matriz de entrada se multiplica elemento por elemento por una matriz más pequeña (núcleo o filtro) y los valores se suman en una nueva matriz. Esto se conoce como una operación de convolución que se repite sobre toda la matriz de entrada. Cada operación se aplica a un segmento diferente de la matriz de entrada. Las redes neuronales convolucionales (CNN) se utilizan comúnmente en visión por computadora. ## D ### DataParallel (DP) Técnica de paralelismo para entrenamiento en múltiples GPUs donde se replica la misma configuración varias veces, con cada instancia recibiendo una porción de datos única. El procesamiento se realiza en paralelo y todas las configuraciones se sincronizan al final de cada paso de entrenamiento. Obtén más información sobre cómo funciona el DataParallel [aquí](perf_train_gpu_many#dataparallel-vs-distributeddataparallel). ### decoder input IDs Esta entrada es específica para modelos codificador-decodificador y contiene los IDs de entrada que se enviarán al decodificador. Estas entradas deben usarse para tareas de secuencia a secuencia, como traducción o resumen, y generalmente se construyen de una manera específica para cada modelo. La mayoría de los modelos codificador-decodificador (BART, T5) crean sus `decoder_input_ids` por sí mismos a partir de las `labels`. En tales modelos, pasar las `labels` es la forma preferida de manejar el entrenamiento. Consulta la documentación de cada modelo para ver cómo manejan estos IDs de entrada para el entrenamiento de secuencia a secuencia. ### decoder models También conocidos como modelos autorregresivos, los modelos decodificadores involucran una tarea de preentrenamiento (llamada modelado de lenguaje causal) donde el modelo lee los textos en orden y tiene que predecir la siguiente palabra. Generalmente, se realiza leyendo la oración completa con una máscara para ocultar los tokens futuros en un cierto paso de tiempo. <Youtube id="d_ixlCubqQw"/> ### deep learning (DL) Algoritmos de aprendizaje automático que utilizan redes neuronales con varias capas. ## E ### encoder models También conocidos como modelos de codificación automática (autoencoding models), los modelos codificadores toman una entrada (como texto o imágenes) y las transforman en una representación numérica condensada llamada embedding. A menudo, los modelos codificadores se entrenan previamente utilizando técnicas como el [modelado de lenguaje enmascarado](#masked-language-modeling-mlm), que enmascara partes de la secuencia de entrada y obliga al modelo a crear representaciones más significativas. <Youtube id="H39Z_720T5s"/> ## F ### feature extraction El proceso de seleccionar y transformar datos crudos en un conjunto de características más informativas y útiles para algoritmos de aprendizaje automático. Algunos ejemplos de extracción de características incluyen transformar texto crudo en embeddings de palabras y extraer características importantes como bordes o formas de datos de imágenes/videos. ### feed forward chunking En cada bloque de atención residual en los transformadores, la capa de autoatención suele ir seguida de 2 capas de avance. El tamaño de embedding intermedio de las capas de avance suele ser mayor que el tamaño oculto del modelo (por ejemplo, para `google-bert/bert-base-uncased`). Para una entrada de tamaño `[batch_size, sequence_length]`, la memoria requerida para almacenar los embeddings intermedios de avance `[batch_size, sequence_length, config.intermediate_size]` puede representar una gran fracción del uso de memoria. Los autores de [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) observaron que, dado que el cálculo es independiente de la dimensión `sequence_length`, es matemáticamente equivalente calcular los embeddings de salida de ambas capas de avance `[batch_size, config.hidden_size]_0, ..., [batch_size, config.hidden_size]_n` individualmente y concatenarlos después a `[batch_size, sequence_length, config.hidden_size]` con `n = sequence_length`, lo que intercambia el aumento del tiempo de cálculo por una reducción en el uso de memoria, pero produce un resultado matemáticamente **equivalente**. Para modelos que utilizan la función [`apply_chunking_to_forward`], el `chunk_size` define el número de embeddings de salida que se calculan en paralelo y, por lo tanto, define el equilibrio entre la complejidad de memoria y tiempo. Si `chunk_size` se establece en 0, no se realiza ninguna fragmentación de avance. ### finetuned models El ajuste fino es una forma de transferencia de aprendizaje que implica tomar un modelo entrenado previamente, congelar sus pesos y reemplazar la capa de salida con una nueva [cabecera de modelo](#head) recién añadida. La cabecera del modelo se entrena en tu conjunto de datos objetivo. Consulta el tutorial [Ajustar finamente un modelo pre-entrenado](https://huggingface.co/docs/transformers/training) para obtener más detalles y aprende cómo ajustar finamente modelos con 🤗 Transformers. ## H ### head La cabecera del modelo se refiere a la última capa de una red neuronal que acepta los estados ocultos crudos y los proyecta en una dimensión diferente. Hay una cabecera de modelo diferente para cada tarea. Por ejemplo: * [`GPT2ForSequenceClassification`] es una cabecera de clasificación de secuencias, es decir, una capa lineal, encima del modelo base [`GPT2Model`]. * [`ViTForImageClassification`] es una cabecera de clasificación de imágenes, es decir, una capa lineal encima del estado oculto final del token `CLS`, encima del modelo base [`ViTModel`]. * [`Wav2Vec2ForCTC`] es una cabecera de modelado de lenguaje con [CTC](#connectionist-temporal-classification-ctc) encima del modelo base [`Wav2Vec2Model`]. ## I ### image patch Los modelos de Transformers basados en visión dividen una imagen en parches más pequeños que se incorporan linealmente y luego se pasan como una secuencia al modelo. Puedes encontrar el `patch_size` (o resolución del modelo) en su configuración. ### inference La inferencia es el proceso de evaluar un modelo en nuevos datos después de completar el entrenamiento. Consulta el tutorial [Pipeline for inference](https://huggingface.co/docs/transformers/pipeline_tutorial) para aprender cómo realizar inferencias con 🤗 Transformers. ### input IDs Los IDs de entrada a menudo son los únicos parámetros necesarios que se deben pasar al modelo como entrada. Son índices de tokens, representaciones numéricas de tokens que construyen las secuencias que se utilizarán como entrada por el modelo. <Youtube id="VFp38yj8h3A"/> Cada tokenizador funciona de manera diferente, pero el mecanismo subyacente sigue siendo el mismo. Aquí tienes un ejemplo utilizando el tokenizador BERT, que es un tokenizador [WordPiece](https://arxiv.org/pdf/1609.08144.pdf): ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence = "A Titan RTX has 24GB of VRAM" ``` El tokenizador se encarga de dividir la secuencia en tokens disponibles en el vocabulario del tokenizador. ```python >>> tokenized_sequence = tokenizer.tokenize(sequence) ``` Los tokens son palabras o sub palabras. Por ejemplo, "VRAM" no estaba en el vocabulario del modelo, así que se dividió en "V", "RA" y "M". Para indicar que estos tokens no son palabras separadas sino partes de la misma palabra, se añade un prefijo de doble almohadilla para "RA" y "M": ```python >>> print(tokenized_sequence) ['A', 'Titan', 'R', '##T', '##X', 'has', '24', '##GB', 'of', 'V', '##RA', '##M'] ``` Estos tokens luego se pueden convertir en IDs que son comprensibles por el modelo. Esto se puede hacer alimentando directamente la oración al tokenizador, que aprovecha la implementación en Rust de [🤗 Tokenizers](https://github.com/huggingface/tokenizers) para obtener un rendimiento óptimo. ```python >>> inputs = tokenizer(sequence) ``` El tokenizador devuelve un diccionario con todos los argumentos necesarios para que su modelo correspondiente funcione correctamente. Los índices de los tokens están bajo la clave `input_ids`: ```python >>> encoded_sequence = inputs["input_ids"] >>> print(encoded_sequence) [101, 138, 18696, 155, 1942, 3190, 1144, 1572, 13745, 1104, 159, 9664, 2107, 102] ``` Ten en cuenta que el tokenizador añade automáticamente "tokens especiales" (si el modelo asociado depende de ellos), que son IDs especiales que el modelo utiliza en ocasiones. Si descodificamos la secuencia anterior de IDs, ```python >>> decoded_sequence = tokenizer.decode(encoded_sequence) ``` Veremos ```python >>> print(decoded_sequence) [CLS] A Titan RTX has 24GB of VRAM [SEP] ``` Porque esta es la forma en que un [`BertModel`] espera sus entradas. ## L ### labels Las etiquetas son un argumento opcional que se puede pasar para que el modelo calcule la pérdida por sí mismo. Estas etiquetas deberían ser la predicción esperada del modelo: usará la pérdida estándar para calcular la pérdida entre sus predicciones y el valor esperado (la etiqueta). Estas etiquetas son diferentes según la cabecera del modelo, por ejemplo: - Para modelos de clasificación de secuencias ([`BertForSequenceClassification`]), el modelo espera un tensor de dimensión `(batch_size)` con cada valor del lote correspondiente a la etiqueta esperada de toda la secuencia. - Para modelos de clasificación de tokens ([`BertForTokenClassification`]), el modelo espera un tensor de dimensión `(batch_size, seq_length)` con cada valor correspondiente a la etiqueta esperada de cada token individual. - Para el modelado de lenguaje enmascarado ([`BertForMaskedLM`]), el modelo espera un tensor de dimensión `(batch_size, seq_length)` con cada valor correspondiente a la etiqueta esperada de cada token individual: las etiquetas son el ID del token enmascarado y los valores deben ignorarse para el resto (generalmente -100). - Para tareas de secuencia a secuencia ([`BartForConditionalGeneration`], [`MBartForConditionalGeneration`]), el modelo espera un tensor de dimensión `(batch_size, tgt_seq_length)` con cada valor correspondiente a las secuencias objetivo asociadas con cada secuencia de entrada. Durante el entrenamiento, tanto BART como T5 generarán internamente los `decoder_input_ids` y las máscaras de atención del decodificador. Por lo general, no es necesario suministrarlos. Esto no se aplica a los modelos que aprovechan el marco codificador-decodificador. - Para modelos de clasificación de imágenes ([`ViTForImageClassification`]), el modelo espera un tensor de dimensión `(batch_size)` con cada valor del lote correspondiente a la etiqueta esperada de cada imagen individual. - Para modelos de segmentación semántica ([`SegformerForSemanticSegmentation`]), el modelo espera un tensor de dimensión `(batch_size, height, width)` con cada valor del lote correspondiente a la etiqueta esperada de cada píxel individual. - Para modelos de detección de objetos ([`DetrForObjectDetection`]), el modelo espera una lista de diccionarios con claves `class_labels` y `boxes` donde cada valor del lote corresponde a la etiqueta esperada y el número de cajas delimitadoras de cada imagen individual. - Para modelos de reconocimiento automático de voz ([`Wav2Vec2ForCTC`]), el modelo espera un tensor de dimensión `(batch_size, target_length)` con cada valor correspondiente a la etiqueta esperada de cada token individual. <Tip> Las etiquetas de cada modelo pueden ser diferentes, así que asegúrate siempre de revisar la documentación de cada modelo para obtener más información sobre sus etiquetas específicas. </Tip> Los modelos base ([`BertModel`]) no aceptan etiquetas, ya que estos son los modelos base de transformadores, que simplemente generan características. ### large language models (LLM) Un término genérico que se refiere a modelos de lenguaje de transformadores (GPT-3, BLOOM, OPT) que fueron entrenados con una gran cantidad de datos. Estos modelos también tienden a tener un gran número de parámetros que se pueden aprender (por ejemplo, 175 mil millones para GPT-3). ## M ### masked language modeling (MLM) Una tarea de preentrenamiento en la que el modelo ve una versión corrupta de los textos, generalmente hecha al enmascarar algunos tokens al azar, y tiene que predecir el texto original. ### multimodal Una tarea que combina textos con otro tipo de entradas (por ejemplo: imágenes). ## N ### Natural language generation (NLG) Todas las tareas relacionadas con la generación de texto (por ejemplo: [Escribe con Transformers](https://transformer.huggingface.co/) o traducción). ### Natural language processing (NLP) Una forma genérica de decir "trabajar con textos". ### Natural language understanding (NLU) Todas las tareas relacionadas con entender lo que hay en un texto (por ejemplo: clasificar el texto completo o palabras individuales). ## P ### Pipeline Un pipeline en 🤗 Transformers es una abstracción que se refiere a una serie de pasos que se ejecutan en un orden específico para preprocesar y transformar datos y devolver una predicción de un modelo. Algunas etapas de ejemplo que se encuentran en un pipeline pueden ser el preprocesamiento de datos, la extracción de características y la normalización. Para obtener más detalles, consulta [Pipelines para inferencia](https://huggingface.co/docs/transformers/pipeline_tutorial). ### PipelineParallel (PP) Técnica de paralelismo en la que el modelo se divide verticalmente (a nivel de capa) en varios GPU, de modo que solo una o varias capas del modelo se colocan en un solo GPU. Cada GPU procesa en paralelo diferentes etapas del pipeline y trabaja en un pequeño fragmento del lote. Obtén más información sobre cómo funciona PipelineParallel [aquí](perf_train_gpu_many#from-naive-model-parallelism-to-pipeline-parallelism). ### pixel values Un tensor de las representaciones numéricas de una imagen que se pasa a un modelo. Los valores de píxeles tienen una forma de [`batch_size`, `num_channels`, `height`, `width`], y se generan a partir de un procesador de imágenes. ### pooling Una operación que reduce una matriz a una matriz más pequeña, ya sea tomando el máximo o el promedio de la dimensión (o dimensiones) agrupada(s). Las capas de agrupación se encuentran comúnmente entre capas convolucionales para reducir la representación de características. ### position IDs A diferencia de las RNN que tienen la posición de cada token incrustada en ellas, los transformers no son conscientes de la posición de cada token. Por lo tanto, se utilizan los IDs de posición (`position_ids`) para que el modelo identifique la posición de cada token en la lista de tokens. Son un parámetro opcional. Si no se pasan `position_ids` al modelo, los IDs se crean automáticamente como embeddings de posición absolutas. Los embeddings de posición absolutas se seleccionan en el rango `[0, config.max_position_embeddings - 1]`. Algunos modelos utilizan otros tipos de embeddings de posición, como embeddings de posición sinusoidales o embeddings de posición relativas. ### preprocessing La tarea de preparar datos crudos en un formato que pueda ser fácilmente consumido por modelos de aprendizaje automático. Por ejemplo, el texto se preprocesa típicamente mediante la tokenización. Para tener una mejor idea de cómo es el preprocesamiento para otros tipos de entrada, consulta el tutorial [Pre-procesar](https://huggingface.co/docs/transformers/preprocessing). ### pretrained model Un modelo que ha sido pre-entrenado en algunos datos (por ejemplo, toda Wikipedia). Los métodos de preentrenamiento involucran un objetivo auto-supervisado, que puede ser leer el texto e intentar predecir la siguiente palabra (ver [modelado de lenguaje causal](#causal-language-modeling)) o enmascarar algunas palabras e intentar predecirlas (ver [modelado de lenguaje enmascarado](#masked-language-modeling-mlm)). Los modelos de habla y visión tienen sus propios objetivos de pre-entrenamiento. Por ejemplo, Wav2Vec2 es un modelo de habla pre-entrenado en una tarea contrastiva que requiere que el modelo identifique la representación de habla "verdadera" de un conjunto de representaciones de habla "falsas". Por otro lado, BEiT es un modelo de visión pre-entrenado en una tarea de modelado de imágenes enmascaradas que enmascara algunos de los parches de la imagen y requiere que el modelo prediga los parches enmascarados (similar al objetivo de modelado de lenguaje enmascarado). ## R ### recurrent neural network (RNN) Un tipo de modelo que utiliza un bucle sobre una capa para procesar textos. ### representation learning Un subcampo del aprendizaje automático que se centra en aprender representaciones significativas de datos en bruto. Algunos ejemplos de técnicas de aprendizaje de representaciones incluyen embeddings de palabras, auto-encoders y Redes Generativas Adversarias (Generative Adversarial Networks, GANs). ## S ### sampling rate Una medida en hercios del número de muestras (la señal de audio) tomadas por segundo. La tasa de muestreo es el resultado de aproximar una señal continua como el habla. ### self-attention Cada elemento de la entrada averigua a cuáles otros elementos de la entrada debe prestar atención. ### self-supervised learning Una categoría de técnicas de aprendizaje automático en la que un modelo crea su propio objetivo de aprendizaje a partir de datos no etiquetados. Difiere del [aprendizaje no supervisado](#unsupervised-learning) y del [aprendizaje supervisado](#supervised-learning) en que el proceso de aprendizaje está supervisado, pero no explícitamente por el usuario. Un ejemplo de aprendizaje auto-supervisado es el [modelado de lenguaje enmascarado](#masked-language-modeling-mlm), donde un modelo recibe oraciones con una proporción de sus tokens eliminados y aprende a predecir los tokens faltantes. ### semi-supervised learning Una amplia categoría de técnicas de entrenamiento de aprendizaje automático que aprovecha una pequeña cantidad de datos etiquetados con una mayor cantidad de datos no etiquetados para mejorar la precisión de un modelo, a diferencia del [aprendizaje supervisado](#supervised-learning) y del [aprendizaje no supervisado](#unsupervised-learning). Un ejemplo de un enfoque de aprendizaje semi-supervisado es "auto-entrenamiento", en el que un modelo se entrena con datos etiquetados y luego se utiliza para hacer predicciones sobre los datos no etiquetados. La porción de datos no etiquetados que el modelo predice con mayor confianza se agrega al conjunto de datos etiquetados y se utiliza para volver a entrenar el modelo. ### sequence-to-sequence (seq2seq) Modelos que generan una nueva secuencia a partir de una entrada, como modelos de traducción o modelos de resumen (como [Bart](model_doc/bart) o [T5](model_doc/t5)). ### Sharded DDP Otro nombre para el concepto fundamental de [ZeRO](#zero-redundancy-optimizer-zero) utilizado por varias otras implementaciones de ZeRO. ### stride En [convolución](#convolution) o [agrupación](#pooling), el paso (stride) se refiere a la distancia que recorre el núcleo sobre una matriz. Un paso de 1 significa que el núcleo se mueve un píxel a la vez, y un paso de 2 significa que el núcleo se mueve dos píxeles a la vez. ### supervised learning Una forma de entrenamiento de modelos que utiliza directamente datos etiquetados para corregir y dirigir el rendimiento del modelo. Los datos se introducen en el modelo en entrenamiento, y sus predicciones se comparan con las etiquetas conocidas. El modelo actualiza sus pesos en función de cuán incorrectas fueron sus predicciones, y el proceso se repite para optimizar el rendimiento del modelo. ## T ### Tensor Parallelism (TP) Técnica de paralelismo para entrenamiento en múltiples GPU en la que cada tensor se divide en múltiples fragmentos, de modo que en lugar de tener todo el tensor en una sola GPU, cada fragmento del tensor reside en su GPU designada. Los fragmentos se procesan por separado y en paralelo en diferentes GPU y los resultados se sincronizan al final del paso de procesamiento.Esto es lo que a veces se llama paralelismo horizontal, ya que la división ocurre a nivel horizontal. Obtén más información sobre el Paralelismo de Tensores [aquí](perf_train_gpu_many#tensor-parallelism). ### token Parte de una oración, generalmente una palabra, pero también puede ser una sub-palabra (las palabras no comunes a menudo se dividen en sub-palabras) o un símbolo de puntuación. ### token Type IDs Algunos modelos tienen como objetivo realizar clasificación en pares de oraciones o responder preguntas. <Youtube id="0u3ioSwev3s"/> Estos requieren que dos secuencias diferentes se unan en una única entrada "input_ids", lo cual generalmente se realiza con la ayuda de tokens especiales, como el token de clasificación (`[CLS]`) y el token separador (`[SEP]`). Por ejemplo, el modelo BERT construye sus dos secuencias de entrada de la siguiente manera: ```python >>> # [CLS] SEQUENCE_A [SEP] SEQUENCE_B [SEP] ``` Podemos utilizar nuestro tokenizador para generar automáticamente una oración de este tipo al pasar las dos secuencias a `tokenizer` como dos argumentos (y no como una lista, como antes) de la siguiente manera: ```python >>> from transformers import BertTokenizer >>> tokenizer = BertTokenizer.from_pretrained("google-bert/bert-base-cased") >>> sequence_a = "HuggingFace is based in NYC" >>> sequence_b = "Where is HuggingFace based?" >>> encoded_dict = tokenizer(sequence_a, sequence_b) >>> decoded = tokenizer.decode(encoded_dict["input_ids"]) ``` Que devolverá: ```python >>> print(decoded) [CLS] HuggingFace is based in NYC [SEP] Where is HuggingFace based? [SEP] ``` Esto es suficiente para que algunos modelos comprendan dónde termina una secuencia y comienza otra. Sin embargo, otros modelos, como BERT, también utilizan identificadores de tipo de token (también llamados identificadores de segmento). Se representan como una máscara binaria que identifica los dos tipos de secuencia en el modelo. El tokenizador devuelve esta máscara como la entrada "token_type_ids": ```python >>> encoded_dict["token_type_ids"] [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` La primera secuencia, el "contexto" utilizado para la pregunta, tiene todos sus tokens representados por un `0`, mientras que la segunda secuencia, correspondiente a la "pregunta", tiene todos sus tokens representados por un `1`. Algunos modelos, como [`XLNetModel`], utilizan un token adicional representado por un `2`. ### transfer learning Una técnica que implica tomar un modelo pre-entrenado y adaptarlo a un conjunto de datos específico para tu tarea. En lugar de entrenar un modelo desde cero, puedes aprovechar el conocimiento obtenido de un modelo existente como punto de partida. Esto acelera el proceso de aprendizaje y reduce la cantidad de datos de entrenamiento necesarios. ### transformer Arquitectura de modelo de aprendizaje profundo basada en auto-atención (Self-attention). ## U ### unsupervised learning Una forma de entrenamiento de modelos en la que los datos proporcionados al modelo no están etiquetados. Las técnicas de aprendizaje no supervisado aprovechan la información estadística de la distribución de datos para encontrar patrones útiles para la tarea en cuestión. ## Z ### Zero Redundancy Optimizer (ZeRO) Técnica de paralelismo que realiza la fragmentación de los tensores de manera algo similar a [TensorParallel](#tensor-parallelism-tp), excepto que todo el tensor se reconstruye a tiempo para una computación hacia adelante o hacia atrás, por lo tanto, el modelo no necesita ser modificado. Este método también admite diversas técnicas de descarga para compensar la memoria limitada de la GPU. Obtén más información sobre ZeRO [aquí](perf_train_gpu_many#zero-data-parallelism).
transformers/docs/source/es/glossary.md/0
{ "file_path": "transformers/docs/source/es/glossary.md", "repo_id": "transformers", "token_count": 10085 }
30
- sections: - local: index title: 🤗 Transformers - local: quicktour title: Visite rapide - local: installation title: Installation title: Démarrer - sections: - local: tutoriel_pipeline title: Pipelines pour l'inférence - local: autoclass_tutorial title: Chargement d'instances pré-entraînées avec une AutoClass - local: in_translation title: Préparation des données - local: in_translation title: Fine-tune un modèle pré-entraîné - local: run_scripts_fr title: Entraînement avec un script - local: in_translation title: Entraînement distribué avec 🤗 Accelerate - local: in_translation title: Chargement et entraînement des adaptateurs avec 🤗 PEFT - local: in_translation title: Partager un modèle - local: in_translation title: Agents - local: in_translation title: Génération avec LLMs title: Tutoriels - sections: - local: task_summary title: Ce que 🤗 Transformers peut faire - local: tasks_explained title: Comment 🤗 Transformers résout ces tâches title: Guides conceptuels
transformers/docs/source/fr/_toctree.yml/0
{ "file_path": "transformers/docs/source/fr/_toctree.yml", "repo_id": "transformers", "token_count": 391 }
31
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Allenamento distribuito con 🤗 Accelerate La parallelizzazione è emersa come strategia per allenare modelli sempre più grandi su hardware limitato e accelerarne la velocità di allenamento di diversi ordini di magnitudine. In Hugging Face, abbiamo creato la libreria [🤗 Accelerate](https://huggingface.co/docs/accelerate) per aiutarti ad allenare in modo semplice un modello 🤗 Transformers su qualsiasi tipo di configurazione distribuita, sia che si tratti di più GPU su una sola macchina o di più GPU su più macchine. In questo tutorial, imparerai come personalizzare il training loop nativo di PyTorch per consentire l'addestramento in un ambiente distribuito. ## Configurazione Inizia installando 🤗 Accelerate: ```bash pip install accelerate ``` Poi importa e crea un oggetto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). `Accelerator` rileverà automaticamente il tuo setup distribuito e inizializzerà tutte le componenti necessarie per l'allenamento. Non dovrai allocare esplicitamente il tuo modello su un device. ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Preparati ad accelerare Il prossimo passo è quello di passare tutti gli oggetti rilevanti per l'allenamento al metodo [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Questo include i tuoi DataLoaders per l'allenamento e per la valutazione, un modello e un ottimizzatore: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Backward Infine, sostituisci il tipico metodo `loss.backward()` nel tuo loop di allenamento con il metodo [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) di 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` Come puoi vedere nel seguente codice, hai solo bisogno di aggiungere quattro righe in più di codice al tuo training loop per abilitare l'allenamento distribuito! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## Allenamento Una volta che hai aggiunto le righe di codice rilevanti, lancia il tuo allenamento in uno script o in un notebook come Colaboratory. ### Allenamento con uno script Se stai eseguendo il tuo allenamento da uno script, esegui il comando seguente per creare e salvare un file di configurazione: ```bash accelerate config ``` Poi lancia il tuo allenamento con: ```bash accelerate launch train.py ``` ### Allenamento con un notebook La libreria 🤗 Accelerate può anche essere utilizzata in un notebook se stai pianificando di utilizzare le TPU di Colaboratory. Inserisci tutto il codice legato all'allenamento in una funzione, e passala al `notebook_launcher`: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` Per maggiori informazioni relative a 🤗 Accelerate e le sue numerose funzionalità, fai riferimento alla [documentazione](https://huggingface.co/docs/accelerate).
transformers/docs/source/it/accelerate.md/0
{ "file_path": "transformers/docs/source/it/accelerate.md", "repo_id": "transformers", "token_count": 1891 }
32
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Inferenza Efficiente su CPU Questa guida si concentra sull'inferenza di modelli di grandi dimensioni in modo efficiente sulla CPU. ## `BetterTransformer` per inferenza più rapida Abbiamo integrato di recente `BetterTransformer` per fare inferenza più rapidamente con modelli per testi, immagini e audio. Visualizza la documentazione sull'integrazione [qui](https://huggingface.co/docs/optimum/bettertransformer/overview) per maggiori dettagli. ## PyTorch JIT-mode (TorchScript) TorchScript è un modo di creare modelli serializzabili e ottimizzabili da codice PyTorch. Ogni programmma TorchScript può esere salvato da un processo Python e caricato in un processo dove non ci sono dipendenze Python. Comparandolo con l'eager mode di default, jit mode in PyTorch normalmente fornisce prestazioni migliori per l'inferenza del modello da parte di metodologie di ottimizzazione come la operator fusion. Per una prima introduzione a TorchScript, vedi la Introduction to [PyTorch TorchScript tutorial](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules). ### IPEX Graph Optimization con JIT-mode Intel® Extension per PyTorch fornnisce ulteriori ottimizzazioni in jit mode per i modelli della serie Transformers. Consigliamo vivamente agli utenti di usufruire dei vantaggi di Intel® Extension per PyTorch con jit mode. Alcuni operator patterns usati fequentemente dai modelli Transformers models sono già supportati in Intel® Extension per PyTorch con jit mode fusions. Questi fusion patterns come Multi-head-attention fusion, Concat Linear, Linear+Add, Linear+Gelu, Add+LayerNorm fusion and etc. sono abilitati e hanno buone performance. I benefici della fusion è fornito agli utenti in modo trasparente. In base alle analisi, il ~70% dei problemi più popolari in NLP question-answering, text-classification, and token-classification possono avere benefici sulle performance grazie ai fusion patterns sia per Float32 precision che per BFloat16 Mixed precision. Vedi maggiori informazioni per [IPEX Graph Optimization](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html). #### Installazione di IPEX I rilasci di IPEX seguono PyTorch, verifica i vari approcci per [IPEX installation](https://intel.github.io/intel-extension-for-pytorch/). ### Utilizzo del JIT-mode Per abilitare JIT-mode in Trainer per evaluation e prediction, devi aggiungere `jit_mode_eval` negli argomenti di Trainer. <Tip warning={true}> per PyTorch >= 1.14.0. JIT-mode potrebe giovare a qualsiasi modello di prediction e evaluaion visto che il dict input è supportato in jit.trace per PyTorch < 1.14.0. JIT-mode potrebbe giovare ai modelli il cui ordine dei parametri corrisponde all'ordine delle tuple in ingresso in jit.trace, come i modelli per question-answering. Nel caso in cui l'ordine dei parametri seguenti non corrisponda all'ordine delle tuple in ingresso in jit.trace, come nei modelli di text-classification, jit.trace fallirà e lo cattureremo con una eccezione al fine di renderlo un fallback. Il logging è usato per notificare gli utenti. </Tip> Trovi un esempo con caso d'uso in [Transformers question-answering](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) - Inference using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--jit_mode_eval </b></pre> - Inference with IPEX using jit mode on CPU: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--use_ipex \</b> <b>--jit_mode_eval</b></pre>
transformers/docs/source/it/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/it/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 1497 }
33
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 🤗 Accelerate を用いた分散学習 モデルが大きくなるにつれて、限られたハードウェアでより大きなモデルを訓練し、訓練速度を大幅に上昇させるための方法として並列処理が浮上してきました。1台のマシンに複数のGPUがあっても、複数のマシンにまたがる複数のGPUがあっても、あらゆるタイプの分散処理セットアップ上でユーザーが簡単に 🤗 Transformers モデルを訓練できるように、 Hugging Face では [🤗 Accelerate](https://huggingface.co/docs/accelerate) ライブラリを作成しました。このチュートリアルでは、PyTorch の訓練ループをカスタマイズして、分散処理環境での訓練を可能にする方法について学びます。 ## セットアップ はじめに 🤗 Accelerate をインストールしましょう: ```bash pip install accelerate ``` そしたらインポートして [`~accelerate.Accelerator`] オブジェクトを作成しましょう。[`~accelerate.Accelerator`] は分散処理セットアップを自動的に検出し、訓練のために必要な全てのコンポーネントを初期化します。モデルをデバイスに明示的に配置する必要はありません。 ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Accelerate する準備をしましょう 次に、関連する全ての訓練オブジェクトを [`~accelerate.Accelerator.prepare`] メソッドに渡します。これには、訓練と評価それぞれのDataloader、モデル、optimizer が含まれます: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Backward 最後に訓練ループ内の `loss.backward()` を 🤗 Accelerate の [`~accelerate.Accelerator.backward`] メソッドで置き換えます: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` 以下のコードで確認できる通り、訓練ループに4行のコードを追加するだけで分散学習が可能です! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## 訓練する 関連するコードを追加したら、スクリプトまたは Colaboratory などのノートブックで訓練を開始します。 ### スクリプトで訓練する スクリプトから訓練をしている場合は、設定ファイルを作成・保存するために以下のコマンドを実行してください: ```bash accelerate config ``` そして次のようにして訓練を開始します: ```bash accelerate launch train.py ``` ### ノートブックで訓練する Colaboratory の TPU の利用をお考えの場合、🤗 Accelerate はノートブック上で実行することもできます。訓練に必要な全てのコードを関数に含め、[`~accelerate.notebook_launcher`] に渡してください: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` 🤗 Accelerate と豊富な機能についてもっと知りたい方は[ドキュメント](https://huggingface.co/docs/accelerate)を参照してください。
transformers/docs/source/ja/accelerate.md/0
{ "file_path": "transformers/docs/source/ja/accelerate.md", "repo_id": "transformers", "token_count": 2185 }
34
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # インストール 使用しているDeep Learningライブラリに対して、🤗 Transformersをインストールしてキャッシュを設定、そしてオプションでオフラインで実行できるように 🤗 Transformersを設定します。 🤗 TransformersはPython 3.6+, PyTorch 1.1.0+, TensorFlow 2.0+, Flaxで動作確認しています。 使用しているDeep Learningライブラリに合わせて、以下のインストール方法に従ってください: * [PyTorch](https://pytorch.org/get-started/locally/)のインストール手順。 * [TensorFlow 2.0](https://www.tensorflow.org/install/pip)のインストール手順。 * [Flax](https://flax.readthedocs.io/en/latest/)のインストール手順。 ## pipでのインストール 🤗 Transformersを[仮想環境](https://docs.python.org/3/library/venv.html)にインストールする必要があります。 もし、Pythonの仮想環境に馴染みがない場合は、この[ガイド](https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/)をご覧ください。仮想環境によって異なるプロジェクトの管理がより簡単になり、依存関係間の互換性の問題を回避できます。 まず、プロジェクトディレクトリに仮想環境を作成することから始めましょう: ```bash python -m venv .env ``` 仮想環境を起動しましょう。LinuxとMacOsの場合は以下のコマンドで起動します: ```bash source .env/bin/activate ``` Windowsで仮想環境を起動します ```bash .env/Scripts/activate ``` これで、次のコマンドで🤗 Transformersをインストールする準備が整いました: ```bash pip install transformers ``` CPU対応のみ必要な場合、🤗 TransformersとDeep Learningライブラリを1行でインストールできるようになっていて便利です。例えば、🤗 TransformersとPyTorchを以下のように一緒にインストールできます: ```bash pip install transformers[torch] ``` 🤗 TransformersとTensorFlow 2.0: ```bash pip install transformers[tf-cpu] ``` 🤗 TransformersとFlax: ```bash pip install transformers[flax] ``` 最後に、以下のコマンドを実行することで🤗 Transformersが正しくインストールされているかを確認します。学習済みモデルがダウンロードされます: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('we love you'))" ``` その後、ラベルとスコアが出力されます: ```bash [{'label': 'POSITIVE', 'score': 0.9998704791069031}] ``` ## ソースからのインストール 以下のコマンドでソースから🤗 Transformersをインストールします: ```bash pip install git+https://github.com/huggingface/transformers ``` このコマンドは最新の安定版ではなく、開発における最新の`main`バージョンをインストールします。`main`バージョンは最新の開発状況に対応するのに便利です。例えば、最後の公式リリース以降にバグが修正されたが、新しいリリースがまだ展開されていない場合などです。しかし、これは`main`バージョンが常に安定しているとは限らないことを意味します。私たちは`main`バージョンの運用を維持するよう努め、ほとんどの問題は通常、数時間から1日以内に解決されます。もし問題に遭遇した場合は、より早く修正できるように[Issue](https://github.com/huggingface/transformers/issues)を作成してください! 以下のコマンドを実行して、🤗 Transformersが正しくインストールされているかどうかを確認します: ```bash python -c "from transformers import pipeline; print(pipeline('sentiment-analysis')('I love you'))" ``` ## 編集可能なインストール 必要に応じて、編集可能なインストールをします: * ソースコードの`main`バージョンを使います。 * 🤗 Transformersにコントリビュートし、コードの変更をテストする必要があります。 以下のコマンドでレポジトリをクローンして、🤗 Transformersをインストールします: ```bash git clone https://github.com/huggingface/transformers.git cd transformers pip install -e . ``` 上記のコマンドは、レポジトリをクローンしたフォルダとPythonのライブラリをパスをリンクします。Pythonは通常のライブラリパスに加えて、あなたがクローンしたフォルダの中も見るようになります。例えば、Pythonパッケージが通常、`~/anaconda3/envs/main/lib/python3.7/site-packages/`にインストールされている場合、Pythonはクローンしたフォルダも検索するようになります: `~/transformers/`. <Tip warning={true}> ライブラリーを使い続けたい場合は、transformersフォルダーを保持しつづける必要があります。 </Tip> これで、次のコマンドで簡単にクローンを🤗 Transformersの最新版に更新できます: ```bash cd ~/transformers/ git pull ``` Python環境は次回の実行時に🤗 Transformersの`main`バージョンを見つけるようになります。 ## condaでのインストール `conda-forge`のcondaチャンネルからインストールします: ```bash conda install conda-forge::transformers ``` ## キャッシュの設定 学習済みモデルはダウンロードされ、ローカルにキャッシュされます: `~/.cache/huggingface/hub`. これはシェル環境変数`TRANSFORMERS_CACHE`で指定されるデフォルトのディレクトリです。Windowsでは、デフォルトのディレクトリは`C:\Users\username\.cache\huggingface\hub`になっています。異なるキャッシュディレクトリを指定するために、以下のシェル環境変数を変更することが可能です。優先度は以下の順番に対応します: 1. シェル環境変数 (デフォルト): `HF_HUB_CACHE` または `TRANSFORMERS_CACHE`. 2. シェル環境変数: `HF_HOME`. 3. シェル環境変数: `XDG_CACHE_HOME` + `/huggingface`. <Tip> もし、以前のバージョンのライブラリを使用していた人で、`PYTORCH_TRANSFORMERS_CACHE`または`PYTORCH_PRETRAINED_BERT_CACHE`を設定していた場合、シェル環境変数`TRANSFORMERS_CACHE`を指定しない限り🤗 Transformersはこれらのシェル環境変数を使用します。 </Tip> ## オフラインモード 🤗 Transformersはローカルファイルのみを使用することでファイアウォールやオフラインの環境でも動作させることができます。この動作を有効にするためには、環境変数`HF_HUB_OFFLINE=1`を設定します。 <Tip> 環境変数`HF_DATASETS_OFFLINE=1`を設定し、オフライントレーニングワークフローに[🤗 Datasets](https://huggingface.co/docs/datasets/)を追加します。 </Tip> 例えば、外部インスタンスに対してファイアウォールで保護された通常のネットワーク上でプログラムを実行する場合、通常以下のようなコマンドで実行することになります: ```bash python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` オフラインインスタンスでこの同じプログラムを実行します: ```bash HF_DATASETS_OFFLINE=1 HF_HUB_OFFLINE=1 \ python examples/pytorch/translation/run_translation.py --model_name_or_path google-t5/t5-small --dataset_name wmt16 --dataset_config ro-en ... ``` このスクリプトは、ローカルファイルのみを検索することが分かっているので、ハングアップしたりタイムアウトを待ったりすることなく実行されるはずです。 ### オフラインで使用するためにモデルやトークナイザーを取得する オフラインで🤗 Transformersを使用するもう1つの方法は、前もってファイルをダウンロードしておき、オフラインで使用する必要があるときにそのローカルパスを指定することです。これには3つの方法があります: * [Model Hub](https://huggingface.co/models)のユーザーインターフェース上から↓アイコンをクリックしてファイルをダウンロードする方法。 ![download-icon](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/download-icon.png) * [`PreTrainedModel.from_pretrained`]および[`PreTrainedModel.save_pretrained`]のワークフローを使用する方法: 1. [`PreTrainedModel.from_pretrained`]で前もってファイルをダウンロードします: ```py >>> from transformers import AutoTokenizer, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("bigscience/T0_3B") >>> model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/T0_3B") ``` 2. [`PreTrainedModel.save_pretrained`]で指定されたディレクトリにファイルを保存しておきます: ```py >>> tokenizer.save_pretrained("./your/path/bigscience_t0") >>> model.save_pretrained("./your/path/bigscience_t0") ``` 3. オフラインにある時、[`PreTrainedModel.from_pretrained`]に指定したディレクトリからファイルをリロードします: ```py >>> tokenizer = AutoTokenizer.from_pretrained("./your/path/bigscience_t0") >>> model = AutoModel.from_pretrained("./your/path/bigscience_t0") ``` * プログラム的に[huggingface_hub](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub)ライブラリを用いて、ファイルをダウンロードする方法: 1. 仮想環境に`huggingface_hub`ライブラリをインストールします: ```bash python -m pip install huggingface_hub ``` 2. 指定のパスにファイルをダウンロードするために、[`hf_hub_download`](https://huggingface.co/docs/hub/adding-a-library#download-files-from-the-hub)関数を使用します。例えば、以下のコマンドで、[T0](https://huggingface.co/bigscience/T0_3B)モデルの`config.json`ファイルを指定のパスにダウンロードできます: ```py >>> from huggingface_hub import hf_hub_download >>> hf_hub_download(repo_id="bigscience/T0_3B", filename="config.json", cache_dir="./your/path/bigscience_t0") ``` ファイルがダウンロードされ、ローカルにキャッシュされたら、そのローカルパスを指定してファイルをロードして使用します: ```py >>> from transformers import AutoConfig >>> config = AutoConfig.from_pretrained("./your/path/bigscience_t0/config.json") ``` <Tip> Hubに保存されているファイルをダウンロードする方法の詳細については、[How to download files from the Hub](https://huggingface.co/docs/hub/how-to-downstream)セクションを参照してください。 </Tip>
transformers/docs/source/ja/installation.md/0
{ "file_path": "transformers/docs/source/ja/installation.md", "repo_id": "transformers", "token_count": 4805 }
35
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # AltCLIP ## 概要 AltCLIPモデルは、「[AltCLIP: Altering the Language Encoder in CLIP for Extended Language Capabilities](https://arxiv.org/abs/2211.06679v2)」という論文でZhongzhi Chen、Guang Liu、Bo-Wen Zhang、Fulong Ye、Qinghong Yang、Ledell Wuによって提案されました。AltCLIP(CLIPの言語エンコーダーの代替)は、様々な画像-テキストペアおよびテキスト-テキストペアでトレーニングされたニューラルネットワークです。CLIPのテキストエンコーダーを事前学習済みの多言語テキストエンコーダーXLM-Rに置き換えることで、ほぼ全てのタスクでCLIPに非常に近い性能を得られ、オリジナルのCLIPの能力を多言語理解などに拡張しました。 論文の要旨は以下の通りです: *この研究では、強力なバイリンガルマルチモーダル表現モデルを訓練するための概念的に単純で効果的な方法を提案します。OpenAIによってリリースされたマルチモーダル表現モデルCLIPから開始し、そのテキストエンコーダを事前学習済みの多言語テキストエンコーダXLM-Rに交換し、教師学習と対照学習からなる2段階のトレーニングスキーマを用いて言語と画像の表現を整合させました。幅広いタスクの評価を通じて、我々の方法を検証します。ImageNet-CN、Flicker30k-CN、COCO-CNを含む多くのタスクで新たな最先端の性能を達成しました。さらに、ほぼすべてのタスクでCLIPに非常に近い性能を得ており、これはCLIPのテキストエンコーダを変更するだけで、多言語理解などの拡張を実現できることを示唆しています。* このモデルは[jongjyh](https://huggingface.co/jongjyh)により提供されました。 ## 使用上のヒントと使用例 AltCLIPの使用方法はCLIPに非常に似ています。CLIPとの違いはテキストエンコーダーにあります。私たちはカジュアルアテンションではなく双方向アテンションを使用し、XLM-Rの[CLS]トークンをテキスト埋め込みを表すものとして取ることに留意してください。 AltCLIPはマルチモーダルな視覚言語モデルです。これは画像とテキストの類似度や、ゼロショット画像分類に使用できます。AltCLIPはViTのようなTransformerを使用して視覚的特徴を、双方向言語モデルを使用してテキスト特徴を取得します。テキストと視覚の両方の特徴は、同一の次元を持つ潜在空間に射影されます。射影された画像とテキスト特徴間のドット積が類似度スコアとして使用されます。 Transformerエンコーダーに画像を与えるには、各画像を固定サイズの重複しないパッチの系列に分割し、それらを線形に埋め込みます。画像全体を表現するための[CLS]トークンが追加されます。著者は絶対位置埋め込みも追加し、結果として得られるベクトルの系列を標準的なTransformerエンコーダーに供給します。[`CLIPImageProcessor`]を使用して、モデルのために画像のサイズ変更(または拡大縮小)と正規化を行うことができます。 [`AltCLIPProcessor`]は、テキストのエンコードと画像の前処理を両方行うために、[`CLIPImageProcessor`]と[`XLMRobertaTokenizer`]を単一のインスタンスにラップします。以下の例は、[`AltCLIPProcessor`]と[`AltCLIPModel`]を使用して画像-テキスト類似スコアを取得する方法を示しています。 ```python >>> from PIL import Image >>> import requests >>> from transformers import AltCLIPModel, AltCLIPProcessor >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AltCLIPProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ``` <Tip> このモデルは`CLIPModel`をベースにしており、オリジナルの[CLIP](clip)と同じように使用してください。 </Tip> ## AltCLIPConfig [[autodoc]] AltCLIPConfig - from_text_vision_configs ## AltCLIPTextConfig [[autodoc]] AltCLIPTextConfig ## AltCLIPVisionConfig [[autodoc]] AltCLIPVisionConfig ## AltCLIPProcessor [[autodoc]] AltCLIPProcessor ## AltCLIPModel [[autodoc]] AltCLIPModel - forward - get_text_features - get_image_features ## AltCLIPTextModel [[autodoc]] AltCLIPTextModel - forward ## AltCLIPVisionModel [[autodoc]] AltCLIPVisionModel - forward
transformers/docs/source/ja/model_doc/altclip.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/altclip.md", "repo_id": "transformers", "token_count": 2400 }
36
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Big Transfer (BiT) ## Overview BiT モデルは、Alexander Kolesnikov、Lucas Beyer、Xiaohua Zhai、Joan Puigcerver、Jessica Yung、Sylvain Gelly によって [Big Transfer (BiT): General Visual Representation Learning](https://arxiv.org/abs/1912.11370) で提案されました。ニール・ホールズビー。 BiT は、[ResNet](resnet) のようなアーキテクチャ (具体的には ResNetv2) の事前トレーニングをスケールアップするための簡単なレシピです。この方法により、転移学習が大幅に改善されます。 論文の要約は次のとおりです。 *事前トレーニングされた表現の転送により、サンプル効率が向上し、視覚用のディープ ニューラル ネットワークをトレーニングする際のハイパーパラメーター調整が簡素化されます。大規模な教師ありデータセットでの事前トレーニングと、ターゲット タスクでのモデルの微調整のパラダイムを再検討します。私たちは事前トレーニングをスケールアップし、Big Transfer (BiT) と呼ぶシンプルなレシピを提案します。いくつかの慎重に選択されたコンポーネントを組み合わせ、シンプルなヒューリスティックを使用して転送することにより、20 を超えるデータセットで優れたパフォーマンスを実現します。 BiT は、クラスごとに 1 つのサンプルから合計 100 万のサンプルまで、驚くほど広範囲のデータ領域にわたって良好にパフォーマンスを発揮します。 BiT は、ILSVRC-2012 で 87.5%、CIFAR-10 で 99.4%、19 タスクの Visual Task Adaptation Benchmark (VTAB) で 76.3% のトップ 1 精度を達成しました。小規模なデータセットでは、BiT は ILSVRC-2012 (クラスあたり 10 例) で 76.8%、CIFAR-10 (クラスあたり 10 例) で 97.0% を達成しました。高い転写性能を実現する主要成分を詳細に分析※。 ## Usage tips - BiT モデルは、アーキテクチャの点で ResNetv2 と同等ですが、次の点が異なります: 1) すべてのバッチ正規化層が [グループ正規化](https://arxiv.org/abs/1803.08494) に置き換えられます。 2) [重みの標準化](https://arxiv.org/abs/1903.10520) は畳み込み層に使用されます。著者らは、両方の組み合わせが大きなバッチサイズでのトレーニングに役立ち、重要な効果があることを示しています。 転移学習への影響。 このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。 元のコードは [こちら](https://github.com/google-research/big_transfer) にあります。 ## Resources BiT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`BitForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## BitConfig [[autodoc]] BitConfig ## BitImageProcessor [[autodoc]] BitImageProcessor - preprocess ## BitModel [[autodoc]] BitModel - forward ## BitForImageClassification [[autodoc]] BitForImageClassification - forward
transformers/docs/source/ja/model_doc/bit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/bit.md", "repo_id": "transformers", "token_count": 1858 }
37
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CLVP ## Overview CLVP (Contrastive Language-Voice Pretrained Transformer) モデルは、James Betker によって [Better speech synthesis through scaling](https://arxiv.org/abs/2305.07243) で提案されました。 論文の要約は次のとおりです。 *近年、画像生成の分野は自己回帰変換器と DDPM の応用によって革命を起こしています。これらのアプローチは、画像生成のプロセスを段階的な確率的プロセスとしてモデル化し、大量のコンピューティングとデータを活用して画像の分布を学習します。パフォーマンスを向上させるこの方法論は、画像に限定される必要はありません。この論文では、画像生成ドメインの進歩を音声合成に適用する方法について説明します。その結果、表現力豊かなマルチ音声テキスト読み上げシステムである TorToise が誕生しました。 このモデルは [Susnato Dhar](https://huggingface.co/susnato) によって提供されました。 元のコードは [ここ](https://github.com/neonbjb/tortoise-tts) にあります。 ## Usage tips 1. CLVP は Tortoise TTS モデルの不可欠な部分です。 2. CLVP を使用して、生成されたさまざまな音声候補を提供されたテキストと比較することができ、最良の音声トークンが拡散モデルに転送されます。 3. Tortoise の使用には、[`ClvpModelForConditionalGeneration.generate()`] メソッドの使用を強くお勧めします。 4. 16 kHz を期待する他のオーディオ モデルとは対照的に、CLVP モデルはオーディオが 22.05 kHz でサンプリングされることを期待していることに注意してください。 ## Brief Explanation: - [`ClvpTokenizer`] はテキスト入力をトークン化し、[`ClvpFeatureExtractor`] は目的のオーディオからログ メル スペクトログラムを抽出します。 - [`ClvpConditioningEncoder`] は、これらのテキスト トークンとオーディオ表現を取得し、テキストとオーディオに基づいて条件付けされた埋め込みに変換します。 - [`ClvpForCausalLM`] は、これらの埋め込みを使用して複数の音声候補を生成します。 - 各音声候補は音声エンコーダ ([`ClvpEncoder`]) を通過してベクトル表現に変換され、テキスト エンコーダ ([`ClvpEncoder`]) はテキスト トークンを同じ潜在空間に変換します。 - 最後に、各音声ベクトルをテキスト ベクトルと比較して、どの音声ベクトルがテキスト ベクトルに最も類似しているかを確認します。 - [`ClvpModelForConditionalGeneration.generate()`] は、上記のすべてのロジックを 1 つのメソッドに圧縮します。 例 : ```python >>> import datasets >>> from transformers import ClvpProcessor, ClvpModelForConditionalGeneration >>> # Define the Text and Load the Audio (We are taking an audio example from HuggingFace Hub using `datasets` library). >>> text = "This is an example text." >>> ds = datasets.load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.cast_column("audio", datasets.Audio(sampling_rate=22050)) >>> sample = ds[0]["audio"] >>> # Define processor and model. >>> processor = ClvpProcessor.from_pretrained("susnato/clvp_dev") >>> model = ClvpModelForConditionalGeneration.from_pretrained("susnato/clvp_dev") >>> # Generate processor output and model output. >>> processor_output = processor(raw_speech=sample["array"], sampling_rate=sample["sampling_rate"], text=text, return_tensors="pt") >>> generated_output = model.generate(**processor_output) ``` ## ClvpConfig [[autodoc]] ClvpConfig - from_sub_model_configs ## ClvpEncoderConfig [[autodoc]] ClvpEncoderConfig ## ClvpDecoderConfig [[autodoc]] ClvpDecoderConfig ## ClvpTokenizer [[autodoc]] ClvpTokenizer - save_vocabulary ## ClvpFeatureExtractor [[autodoc]] ClvpFeatureExtractor - __call__ ## ClvpProcessor [[autodoc]] ClvpProcessor - __call__ - decode - batch_decode ## ClvpModelForConditionalGeneration [[autodoc]] ClvpModelForConditionalGeneration - forward - generate - get_text_features - get_speech_features ## ClvpForCausalLM [[autodoc]] ClvpForCausalLM ## ClvpModel [[autodoc]] ClvpModel ## ClvpEncoder [[autodoc]] ClvpEncoder ## ClvpDecoder [[autodoc]] ClvpDecoder
transformers/docs/source/ja/model_doc/clvp.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/clvp.md", "repo_id": "transformers", "token_count": 2038 }
38
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # DeiT ## Overview DeiT モデルは、Hugo Touvron、Matthieu Cord、Matthijs Douze、Francisco Massa、Alexandre Sablayrolles, Hervé Jégou.によって [Training data-efficient image Transformers & distillation through attention](https://arxiv.org/abs/2012.12877) で提案されました。 サブレイロール、エルヴェ・ジェグー。 [Dosovitskiy et al., 2020](https://arxiv.org/abs/2010.11929) で紹介された [Vision Transformer (ViT)](vit) は、既存の畳み込みニューラルと同等、またはそれを上回るパフォーマンスを発揮できることを示しました。 Transformer エンコーダ (BERT のような) を使用したネットワーク。ただし、その論文で紹介された ViT モデルには、次のトレーニングが必要でした。 外部データを使用して、数週間にわたる高価なインフラストラクチャ。 DeiT (データ効率の高い画像変換器) はさらに優れています 画像分類用に効率的にトレーニングされたトランスフォーマーにより、必要なデータとコンピューティング リソースがはるかに少なくなります。 オリジナルの ViT モデルとの比較。 論文の要約は次のとおりです。 *最近、純粋に注意に基づくニューラル ネットワークが、画像などの画像理解タスクに対処できることが示されました。 分類。ただし、これらのビジュアル トランスフォーマーは、 インフラストラクチャが高価であるため、その採用が制限されています。この作業では、コンボリューションフリーの競争力のあるゲームを作成します。 Imagenet のみでトレーニングしてトランスフォーマーを作成します。 1 台のコンピューターで 3 日以内にトレーニングを行います。私たちの基準となるビジョン トランス (86M パラメータ) は、外部なしで ImageNet 上で 83.1% (単一クロップ評価) のトップ 1 の精度を達成します。 データ。さらに重要なのは、トランスフォーマーに特有の教師と生徒の戦略を導入することです。蒸留に依存している 学生が注意を払って教師から学ぶことを保証するトークン。私たちはこのトークンベースに興味を示します 特に convnet を教師として使用する場合。これにより、convnet と競合する結果を報告できるようになります。 Imagenet (最大 85.2% の精度が得られます) と他のタスクに転送するときの両方で。私たちはコードを共有し、 モデル。* このモデルは、[nielsr](https://huggingface.co/nielsr) によって提供されました。このモデルの TensorFlow バージョンは、[amyeroberts](https://huggingface.co/amyeroberts) によって追加されました。 ## Usage tips - ViT と比較して、DeiT モデルはいわゆる蒸留トークンを使用して教師から効果的に学習します (これは、 DeiT 論文は、ResNet のようなモデルです)。蒸留トークンは、バックプロパゲーションを通じて、と対話することによって学習されます。 セルフアテンション層を介したクラス ([CLS]) とパッチ トークン。 - 抽出されたモデルを微調整するには 2 つの方法があります。(1) 上部に予測ヘッドを配置するだけの古典的な方法。 クラス トークンの最終的な非表示状態を抽出し、蒸留シグナルを使用しない、または (2) 両方の 予測ヘッドはクラス トークンの上と蒸留トークンの上にあります。その場合、[CLS] 予測は head は、head の予測とグラウンド トゥルース ラベル間の通常のクロスエントロピーを使用してトレーニングされます。 蒸留予測ヘッドは、硬蒸留 (予測と予測の間のクロスエントロピー) を使用してトレーニングされます。 蒸留ヘッドと教師が予測したラベル)。推論時に、平均予測を取得します。 最終的な予測として両頭の間で。 (2) は「蒸留による微調整」とも呼ばれます。 下流のデータセットですでに微調整されている教師。モデル的には (1) に相当します。 [`DeiTForImageClassification`] と (2) に対応します。 [`DeiTForImageClassificationWithTeacher`]。 - 著者らは (2) についてもソフト蒸留を試みたことに注意してください (この場合、蒸留予測ヘッドは 教師のソフトマックス出力に一致するように KL ダイバージェンスを使用してトレーニングしました)が、ハード蒸留が最良の結果をもたらしました。 - リリースされたすべてのチェックポイントは、ImageNet-1k のみで事前トレーニングおよび微調整されました。外部データは使用されませんでした。これは JFT-300M データセット/Imagenet-21k などの外部データを使用した元の ViT モデルとは対照的です。 事前トレーニング。 - DeiT の作者は、より効率的にトレーニングされた ViT モデルもリリースしました。これは、直接プラグインできます。 [`ViTModel`] または [`ViTForImageClassification`]。データなどのテクニック はるかに大規模なデータセットでのトレーニングをシミュレートするために、拡張、最適化、正則化が使用されました。 (ただし、事前トレーニングには ImageNet-1k のみを使用します)。 4 つのバリエーション (3 つの異なるサイズ) が利用可能です。 *facebook/deit-tiny-patch16-224*、*facebook/deit-small-patch16-224*、*facebook/deit-base-patch16-224* および *facebook/deit-base-patch16-384*。以下を行うには [`DeiTImageProcessor`] を使用する必要があることに注意してください。 モデル用の画像を準備します。 ## Resources DeiT を始めるのに役立つ公式 Hugging Face およびコミュニティ (🌎 で示されている) リソースのリスト。 <PipelineTag pipeline="image-classification"/> - [`DeiTForImageClassification`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-classification) および [ノートブック](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/image_classification.ipynb)。 - 参照: [画像分類タスク ガイド](../tasks/image_classification) それに加えて: - [`DeiTForMaskedImageModeling`] は、この [サンプル スクリプト](https://github.com/huggingface/transformers/tree/main/examples/pytorch/image-pretraining) でサポートされています。 ここに含めるリソースの送信に興味がある場合は、お気軽にプル リクエストを開いてください。審査させていただきます。リソースは、既存のリソースを複製するのではなく、何か新しいものを示すことが理想的です。 ## DeiTConfig [[autodoc]] DeiTConfig ## DeiTFeatureExtractor [[autodoc]] DeiTFeatureExtractor - __call__ ## DeiTImageProcessor [[autodoc]] DeiTImageProcessor - preprocess <frameworkcontent> <pt> ## DeiTModel [[autodoc]] DeiTModel - forward ## DeiTForMaskedImageModeling [[autodoc]] DeiTForMaskedImageModeling - forward ## DeiTForImageClassification [[autodoc]] DeiTForImageClassification - forward ## DeiTForImageClassificationWithTeacher [[autodoc]] DeiTForImageClassificationWithTeacher - forward </pt> <tf> ## TFDeiTModel [[autodoc]] TFDeiTModel - call ## TFDeiTForMaskedImageModeling [[autodoc]] TFDeiTForMaskedImageModeling - call ## TFDeiTForImageClassification [[autodoc]] TFDeiTForImageClassification - call ## TFDeiTForImageClassificationWithTeacher [[autodoc]] TFDeiTForImageClassificationWithTeacher - call </tf> </frameworkcontent>
transformers/docs/source/ja/model_doc/deit.md/0
{ "file_path": "transformers/docs/source/ja/model_doc/deit.md", "repo_id": "transformers", "token_count": 3682 }
39
<!-- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ このファイルはMarkdown形式ですが、Hugging Faceのドキュメントビルダー向けに特定の構文を含んでいるため、 通常のMarkdownビューアーで正しく表示されないことに注意してください。 --> # Quick tour [[open-in-colab]] 🤗 Transformersを使い始めましょう! 開発者であろうと、日常的なユーザーであろうと、このクイックツアーは 初めて始めるのを支援し、[`pipeline`]を使った推論方法、[AutoClass](./model_doc/auto)で事前学習済みモデルとプリプロセッサをロードする方法、 そしてPyTorchまたはTensorFlowで素早くモデルをトレーニングする方法を示します。 初心者の場合、ここで紹介されたコンセプトの詳細な説明を提供する チュートリアルまたは[コース](https://huggingface.co/course/chapter1/1)を次に参照することをお勧めします。 始める前に、必要なライブラリがすべてインストールされていることを確認してください: ```bash !pip install transformers datasets evaluate accelerate ``` あなたはまた、好きな機械学習フレームワークをインストールする必要があります: <frameworkcontent> <pt> ```bash pip install torch ``` </pt> <tf> ```bash pip install tensorflow ``` </tf> </frameworkcontent> ## Pipeline <Youtube id="tiZFewofSLM"/> [`pipeline`] は、事前学習済みモデルを推論に最も簡単で高速な方法です。 [`pipeline`] を使用することで、さまざまなモダリティにわたる多くのタスクに対して即座に使用できます。 いくつかのタスクは以下の表に示されています: <Tip> 使用可能なタスクの完全な一覧については、[pipeline API リファレンス](./main_classes/pipelines)を確認してください。 </Tip> | **タスク** | **説明** | **モダリティ** | **パイプライン識別子** | |------------------------------|--------------------------------------------------------------------------------------------------------------|-----------------|-----------------------------------------------| | テキスト分類 | テキストのシーケンスにラベルを割り当てる | NLP | pipeline(task="sentiment-analysis") | | テキスト生成 | プロンプトを指定してテキストを生成する | NLP | pipeline(task="text-generation") | | 要約 | テキストまたはドキュメントの要約を生成する | NLP | pipeline(task="summarization") | | 画像分類 | 画像にラベルを割り当てる | コンピュータビジョン | pipeline(task="image-classification") | | 画像セグメンテーション | 画像の各個別のピクセルにラベルを割り当てる(セマンティック、パノプティック、およびインスタンスセグメンテーションをサポート) | コンピュータビジョン | pipeline(task="image-segmentation") | | オブジェクト検出 | 画像内のオブジェクトの境界ボックスとクラスを予測する | コンピュータビジョン | pipeline(task="object-detection") | | オーディオ分類 | オーディオデータにラベルを割り当てる | オーディオ | pipeline(task="audio-classification") | | 自動音声認識 | 音声をテキストに変換する | オーディオ | pipeline(task="automatic-speech-recognition") | | ビジュアルクエスチョン応答 | 画像と質問が与えられた場合に、画像に関する質問に回答する | マルチモーダル | pipeline(task="vqa") | | ドキュメントクエスチョン応答 | ドキュメントと質問が与えられた場合に、ドキュメントに関する質問に回答する | マルチモーダル | pipeline(task="document-question-answering") | | 画像キャプショニング | 与えられた画像にキャプションを生成する | マルチモーダル | pipeline(task="image-to-text") | まず、[`pipeline`] のインスタンスを作成し、使用したいタスクを指定します。 このガイドでは、センチメント分析のために [`pipeline`] を使用する例を示します: ```python >>> from transformers import pipeline >>> classifier = pipeline("sentiment-analysis") ``` [`pipeline`]は、感情分析のためのデフォルトの[事前学習済みモデル](https://huggingface.co/distilbert/distilbert-base-uncased-finetuned-sst-2-english)とトークナイザをダウンロードしてキャッシュし、使用できるようになります。 これで、`classifier`を対象のテキストに使用できます: ```python >>> classifier("私たちは🤗 Transformersライブラリをお見せできてとても嬉しいです。") [{'label': 'POSITIVE', 'score': 0.9998}] ``` 複数の入力がある場合は、[`pipeline`]に入力をリストとして渡して、辞書のリストを返します: ```py >>> results = classifier(["🤗 Transformersライブラリをご紹介できて非常に嬉しいです。", "嫌いにならないでほしいです。"]) >>> for result in results: ... print(f"label: {result['label']}, スコア: {round(result['score'], 4)}") label: POSITIVE, スコア: 0.9998 label: NEGATIVE, スコア: 0.5309 ``` [`pipeline`]は、任意のタスクに対してデータセット全体を繰り返し処理することもできます。この例では、自動音声認識をタスクとして選びましょう: ```python >>> import torch >>> from transformers import pipeline >>> speech_recognizer = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") ``` オーディオデータセットをロードします(詳細については🤗 Datasets [クイックスタート](https://huggingface.co/docs/datasets/quickstart#audio)を参照してください)。 たとえば、[MInDS-14](https://huggingface.co/datasets/PolyAI/minds14)データセットをロードします: ```python >>> from datasets import load_dataset, Audio >>> dataset = load_dataset("PolyAI/minds14", name="en-US", split="train") # doctest: +IGNORE_RESULT ``` データセットのサンプリングレートが[`facebook/wav2vec2-base-960h`](https://huggingface.co/facebook/wav2vec2-base-960h)がトレーニングされたサンプリングレートと一致することを確認してください: ```py >>> dataset = dataset.cast_column("audio", Audio(sampling_rate=speech_recognizer.feature_extractor.sampling_rate)) ``` "audio"列を呼び出すと、オーディオファイルは自動的にロードされ、リサンプリングされます。最初の4つのサンプルから生の波形配列を抽出し、それをパイプラインにリストとして渡します。 ```py >>> result = speech_recognizer(dataset[:4]["audio"]) >>> print([d["text"] for d in result]) ['I WOULD LIKE TO SET UP A JOINT ACCOUNT WITH MY PARTNER HOW DO I PROCEED WITH DOING THAT', "FONDERING HOW I'D SET UP A JOIN TO HELL T WITH MY WIFE AND WHERE THE AP MIGHT BE", "I I'D LIKE TOY SET UP A JOINT ACCOUNT WITH MY PARTNER I'M NOT SEEING THE OPTION TO DO IT ON THE APSO I CALLED IN TO GET SOME HELP CAN I JUST DO IT OVER THE PHONE WITH YOU AND GIVE YOU THE INFORMATION OR SHOULD I DO IT IN THE AP AN I'M MISSING SOMETHING UQUETTE HAD PREFERRED TO JUST DO IT OVER THE PHONE OF POSSIBLE THINGS", 'HOW DO I FURN A JOINA COUT'] ``` 大規模なデータセットで、入力が大きい場合(音声や画像など)、すべての入力をメモリに読み込む代わりに、リストではなくジェネレータを渡すことがお勧めです。詳細については[パイプラインAPIリファレンス](./main_classes/pipelines)を参照してください。 ### Use another model and tokenizer in the pipeline [`pipeline`]は[Hub](https://huggingface.co/models)からの任意のモデルを収容でき、他のユースケースに[`pipeline`]を適応させることが容易です。たとえば、フランス語のテキストを処理できるモデルが必要な場合、Hubのタグを使用して適切なモデルをフィルタリングできます。トップのフィルタリングされた結果は、フランス語のテキストに使用できる感情分析用に調整された多言語の[BERTモデル](https://huggingface.co/nlptown/bert-base-multilingual-uncased-sentiment)を返します: ```py >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" ``` <frameworkcontent> <pt> [`AutoModelForSequenceClassification`]と[`AutoTokenizer`]を使用して事前学習済みモデルとそれに関連するトークナイザをロードします(次のセクションで`AutoClass`について詳しく説明します): ```python >>> from transformers import AutoTokenizer, AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </pt> <tf> 以下のコードは、[`TFAutoModelForSequenceClassification`]および[`AutoTokenizer`]を使用して、事前学習済みモデルとその関連するトークナイザをロードする方法を示しています(`TFAutoClass`については次のセクションで詳しく説明します): ```python >>> from transformers import AutoTokenizer, TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained(model_name) >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` </tf> </frameworkcontent> 指定したモデルとトークナイザを[`pipeline`]に設定し、今度はフランス語のテキストに`classifier`を適用できます: ```py >>> classifier = pipeline("sentiment-analysis", model=model, tokenizer=tokenizer) >>> classifier("Nous sommes très heureux de vous présenter la bibliothèque 🤗 Transformers.") [{'label': '5 stars', 'score': 0.7273}] ``` もし、あなたのユースケースに適したモデルが見つからない場合、事前学習済みモデルをあなたのデータでファインチューニングする必要があります。 ファインチューニングの方法については、[ファインチューニングのチュートリアル](./training)をご覧ください。 最後に、ファインチューニングした事前学習済みモデルを共有し、コミュニティと共有ハブで共有することを検討してください。これにより、機械学習を民主化する手助けができます! 🤗 ## AutoClass <Youtube id="AhChOFRegn4"/> [`AutoModelForSequenceClassification`] および [`AutoTokenizer`] クラスは、上記で使用した [`pipeline`] を駆動するために協力して動作します。 [AutoClass](./model_doc/auto) は、事前学習済みモデルのアーキテクチャをその名前またはパスから自動的に取得するショートカットです。 適切な `AutoClass` を選択し、それに関連する前処理クラスを選択するだけで済みます。 前のセクションからの例に戻り、`AutoClass` を使用して [`pipeline`] の結果を再現する方法を見てみましょう。 ### AutoTokenizer トークナイザはテキストをモデルの入力として使用できる数値の配列に前処理する役割を果たします。 トークナイゼーションプロセスには、単語をどのように分割するかや、単語をどのレベルで分割するかといった多くのルールがあります (トークナイゼーションについての詳細は [トークナイザサマリー](./tokenizer_summary) をご覧ください)。 最も重要なことは、モデルが事前学習済みになったときと同じトークナイゼーションルールを使用するために、同じモデル名でトークナイザをインスタンス化する必要があることです。 [`AutoTokenizer`] を使用してトークナイザをロードします: ```python >>> from transformers import AutoTokenizer >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tokenizer = AutoTokenizer.from_pretrained(model_name) ``` Pass your text to the tokenizer: ```python >>> encoding = tokenizer("私たちは🤗 Transformersライブラリをお見せできてとても嬉しいです。") >>> print(encoding) {'input_ids': [101, 11312, 10320, 12495, 19308, 10114, 11391, 10855, 10103, 100, 58263, 13299, 119, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` トークナイザは、次の情報を含む辞書を返します: - [input_ids](./glossary#input-ids): トークンの数値表現。 - [attention_mask](.glossary#attention-mask): どのトークンにアテンションを向けるかを示します。 トークナイザはまた、入力のリストを受け入れ、一様な長さのバッチを返すためにテキストをパディングおよび切り詰めることができます。 <frameworkcontent> <pt> ```py >>> pt_batch = tokenizer( ... ["🤗 Transformersライブラリをお見せできて非常に嬉しいです。", "嫌いではないことを願っています。"], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="pt", ... ) ``` </pt> <tf> ```py >>> tf_batch = tokenizer( ... ["We are very happy to show you the 🤗 Transformers library.", "We hope you don't hate it."], ... padding=True, ... truncation=True, ... max_length=512, ... return_tensors="tf", ... ) ``` </tf> </frameworkcontent> <Tip> [前処理](./preprocessing)チュートリアルをご覧いただき、トークナイゼーションの詳細や、[`AutoImageProcessor`]、[`AutoFeatureExtractor`]、[`AutoProcessor`]を使用して画像、オーディオ、およびマルチモーダル入力を前処理する方法について詳しく説明されているページもご覧ください。 </Tip> ### AutoModel <frameworkcontent> <pt> 🤗 Transformersは事前学習済みインスタンスを簡単に統一的にロードする方法を提供します。 これは、[`AutoTokenizer`]をロードするのと同じように[`AutoModel`]をロードできることを意味します。 タスクに適した[`AutoModel`]を選択する以外の違いはありません。 テキスト(またはシーケンス)分類の場合、[`AutoModelForSequenceClassification`]をロードする必要があります: ```py >>> from transformers import AutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> pt_model = AutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> [`AutoModel`]クラスでサポートされているタスクに関する詳細については、[タスクの概要](./task_summary)を参照してください。 </Tip> 今、前処理済みのバッチを直接モデルに渡します。辞書を展開するだけで、`**`を追加する必要があります: ```python >>> pt_outputs = pt_model(**pt_batch) ``` モデルは、`logits`属性に最終的なアクティベーションを出力します。 `logits`にsoftmax関数を適用して確率を取得します: ```py >>> from torch import nn >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1) >>> print(pt_predictions) tensor([[0.0021, 0.0018, 0.0115, 0.2121, 0.7725], [0.2084, 0.1826, 0.1969, 0.1755, 0.2365]], grad_fn=<SoftmaxBackward0>) ``` </pt> <tf> 🤗 Transformersは事前学習済みインスタンスをロードするためのシンプルで統一された方法を提供します。 これは、[`TFAutoModel`]を[`AutoTokenizer`]をロードするのと同じようにロードできることを意味します。 唯一の違いは、タスクに適した[`TFAutoModel`]を選択することです。 テキスト(またはシーケンス)分類の場合、[`TFAutoModelForSequenceClassification`]をロードする必要があります: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model_name = "nlptown/bert-base-multilingual-uncased-sentiment" >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name) ``` <Tip> 詳細については、[`AutoModel`]クラスでサポートされているタスクに関する情報は、[タスクの概要](./task_summary)を参照してください。 </Tip> 次に、前処理済みのバッチを直接モデルに渡します。テンソルをそのまま渡すことができます: ```python >>> tf_outputs = tf_model(tf_batch) ``` モデルは`logits`属性に最終的なアクティベーションを出力します。`logits`にソフトマックス関数を適用して確率を取得します: ```python >>> import tensorflow as tf >>> tf_predictions = tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf_predictions # doctest: +IGNORE_RESULT ``` </tf> </frameworkcontent> <Tip> 🤗 Transformersのすべてのモデル(PyTorchまたはTensorFlow)は、最終的な活性化関数(softmaxなど)*前*のテンソルを出力します。 最終的な活性化関数は、しばしば損失と結合されているためです。モデルの出力は特別なデータクラスであり、その属性はIDEで自動補完されます。 モデルの出力は、タプルまたは辞書のように動作します(整数、スライス、または文字列でインデックスを付けることができます)。 この場合、Noneである属性は無視されます。 </Tip> ### Save a Model <frameworkcontent> <pt> モデルをファインチューニングしたら、[`PreTrainedModel.save_pretrained`]を使用してトークナイザと共に保存できます: ```py >>> pt_save_directory = "./pt_save_pretrained" >>> tokenizer.save_pretrained(pt_save_directory) # doctest: +IGNORE_RESULT >>> pt_model.save_pretrained(pt_save_directory) ``` 再びモデルを使用する準備ができたら、[`PreTrainedModel.from_pretrained`]を使用して再度ロードします: ```py >>> pt_model = AutoModelForSequenceClassification.from_pretrained("./pt_save_pretrained") ``` </pt> <tf> モデルをファインチューニングしたら、そのトークナイザを使用してモデルを保存できます。[`TFPreTrainedModel.save_pretrained`]を使用します: ```py >>> tf_save_directory = "./tf_save_pretrained" >>> tokenizer.save_pretrained(tf_save_directory) # doctest: +IGNORE_RESULT >>> tf_model.save_pretrained(tf_save_directory) ``` モデルを再度使用する準備ができたら、[`TFPreTrainedModel.from_pretrained`]を使用して再度ロードします: ```py >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained("./tf_save_pretrained") ``` </tf> </frameworkcontent> 🤗 Transformersの特に素晴らしい機能の一つは、モデルを保存し、それをPyTorchモデルまたはTensorFlowモデルとして再ロードできることです。 `from_pt`または`from_tf`パラメータを使用してモデルをフレームワーク間で変換できます: <frameworkcontent> <pt> ```py >>> from transformers import AutoModel >>> tokenizer = AutoTokenizer.from_pretrained(pt_save_directory) >>> pt_model = AutoModelForSequenceClassification.from_pretrained(pt_save_directory, from_pt=True) ``` </pt> <tf> ```py >>> from transformers import TFAutoModel >>> tokenizer = AutoTokenizer.from_pretrained(tf_save_directory) >>> tf_model = TFAutoModelForSequenceClassification.from_pretrained(tf_save_directory, from_tf=True) ``` </tf> </frameworkcontent> ## Custom model builds モデルを構築方法を変更するには、モデルの設定クラスを変更できます。設定はモデルの属性を指定します。例えば、隠れ層の数やアテンションヘッドの数などがこれに含まれます。カスタム設定クラスからモデルを初期化する際には、ゼロから始めます。モデルの属性はランダムに初期化され、有意義な結果を得るためにモデルをトレーニングする必要があります。 最初に[`AutoConfig`]をインポートし、変更したい事前学習済みモデルをロードします。[`AutoConfig.from_pretrained`]内で、変更したい属性(例:アテンションヘッドの数)を指定できます: ```python >>> from transformers import AutoConfig >>> my_config = AutoConfig.from_pretrained("distilbert/distilbert-base-uncased", n_heads=12) ``` <frameworkcontent> <pt> [`AutoModel.from_config`]を使用してカスタム設定からモデルを作成します: ```python >>> from transformers import AutoModel >>> my_model = AutoModel.from_config(my_config) ``` </pt> <tf> カスタム構成からモデルを作成するには、[`TFAutoModel.from_config`]を使用します: ```py >>> from transformers import TFAutoModel >>> my_model = TFAutoModel.from_config(my_config) ``` </tf> </frameworkcontent> [カスタムアーキテクチャを作成](./create_a_model)ガイドを参照して、カスタム構成の詳細情報を確認してください。 ## Trainer - PyTorch向けの最適化されたトレーニングループ すべてのモデルは標準の[`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)であるため、通常のトレーニングループで使用できます。 独自のトレーニングループを作成できますが、🤗 TransformersはPyTorch向けに[`Trainer`]クラスを提供しており、基本的なトレーニングループに加えて、 分散トレーニング、混合精度などの機能の追加を行っています。 タスクに応じて、通常は[`Trainer`]に以下のパラメータを渡します: 1. [`PreTrainedModel`]または[`torch.nn.Module`](https://pytorch.org/docs/stable/nn.html#torch.nn.Module)から始めます: ```py >>> from transformers import AutoModelForSequenceClassification >>> model = AutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. [`TrainingArguments`]には、変更できるモデルのハイパーパラメータが含まれており、学習率、バッチサイズ、トレーニングエポック数などが変更できます。指定しない場合、デフォルト値が使用されます: ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="path/to/save/folder/", ... learning_rate=2e-5, ... per_device_train_batch_size=8, ... per_device_eval_batch_size=8, ... num_train_epochs=2, ... ) ``` 3. トークナイザ、画像プロセッサ、特徴量抽出器、またはプロセッサのような前処理クラスをロードします: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 4. データセットをロードする: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("rotten_tomatoes") # doctest: +IGNORE_RESULT ``` 5. データセットをトークン化するための関数を作成します: ```python >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) ``` その後、[`~datasets.Dataset.map`]を使用してデータセット全体に適用します: ```python >>> dataset = dataset.map(tokenize_dataset, batched=True) ``` 6. データセットからの例のバッチを作成するための [`DataCollatorWithPadding`]: ```py >>> from transformers import DataCollatorWithPadding >>> data_collator = DataCollatorWithPadding(tokenizer=tokenizer) ``` 次に、これらのクラスを[`Trainer`]にまとめます: ```python >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... train_dataset=dataset["train"], ... eval_dataset=dataset["test"], ... processing_class=tokenizer, ... data_collator=data_collator, ... ) # doctest: +SKIP ``` 訓練を開始する準備ができたら、[`~Trainer.train`]を呼び出してトレーニングを開始します: ```py >>> trainer.train() # doctest: +SKIP ``` <Tip> 翻訳や要約など、シーケンス間モデルを使用するタスクには、代わりに[`Seq2SeqTrainer`]と[`Seq2SeqTrainingArguments`]クラスを使用してください。 </Tip> [`Trainer`]内のメソッドをサブクラス化することで、トレーニングループの動作をカスタマイズできます。これにより、損失関数、オプティマイザ、スケジューラなどの機能をカスタマイズできます。サブクラス化できるメソッドの一覧については、[`Trainer`]リファレンスをご覧ください。 トレーニングループをカスタマイズする別の方法は、[Callbacks](./main_classes/callback)を使用することです。コールバックを使用して他のライブラリと統合し、トレーニングループを監視して進捗状況を報告したり、トレーニングを早期に停止したりできます。コールバックはトレーニングループ自体には何も変更を加えません。損失関数などのカスタマイズを行う場合は、[`Trainer`]をサブクラス化する必要があります。 ## Train with TensorFlow すべてのモデルは標準の[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)であるため、[Keras](https://keras.io/) APIを使用してTensorFlowでトレーニングできます。 🤗 Transformersは、データセットを`tf.data.Dataset`として簡単にロードできるようにする[`~TFPreTrainedModel.prepare_tf_dataset`]メソッドを提供しており、Kerasの[`compile`](https://keras.io/api/models/model_training_apis/#compile-method)および[`fit`](https://keras.io/api/models/model_training_apis/#fit-method)メソッドを使用してトレーニングをすぐに開始できます。 1. [`TFPreTrainedModel`]または[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model)から始めます: ```py >>> from transformers import TFAutoModelForSequenceClassification >>> model = TFAutoModelForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 2. トークナイザ、画像プロセッサ、特徴量抽出器、またはプロセッサのような前処理クラスをロードします: ```py >>> from transformers import AutoTokenizer >>> tokenizer = AutoTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 3. データセットをトークナイズするための関数を作成します: ```python >>> def tokenize_dataset(dataset): ... return tokenizer(dataset["text"]) # doctest: +SKIP ``` 4. [`~datasets.Dataset.map`]を使用してデータセット全体にトークナイザを適用し、データセットとトークナイザを[`~TFPreTrainedModel.prepare_tf_dataset`]に渡します。バッチサイズを変更し、データセットをシャッフルすることもできます。 ```python >>> dataset = dataset.map(tokenize_dataset) # doctest: +SKIP >>> tf_dataset = model.prepare_tf_dataset( ... dataset["train"], batch_size=16, shuffle=True, tokenizer=tokenizer ... ) # doctest: +SKIP ``` 5. 準備ができたら、`compile`と`fit`を呼び出してトレーニングを開始できます。 Transformersモデルはすべてデフォルトのタスクに関連する損失関数を持っているため、指定しない限り、損失関数を指定する必要はありません。 ```python >>> from tensorflow.keras.optimizers import Adam >>> model.compile(optimizer=Adam(3e-5)) # 損失関数の引数は不要です! >>> model.fit(tf ``` ## What's next? 🤗 Transformersのクイックツアーを完了したら、ガイドをチェックして、カスタムモデルの作成、タスクのためのファインチューニング、スクリプトを使用したモデルのトレーニングなど、より具体的なことを学ぶことができます。🤗 Transformersのコアコンセプトについてもっと詳しく知りたい場合は、コンセプチュアルガイドを読んでみてください!
transformers/docs/source/ja/quicktour.md/0
{ "file_path": "transformers/docs/source/ja/quicktour.md", "repo_id": "transformers", "token_count": 13067 }
40
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Object detection [[open-in-colab]] オブジェクト検出は、画像内のインスタンス (人間、建物、車など) を検出するコンピューター ビジョン タスクです。物体検出モデルは画像を入力および出力として受け取ります 検出されたオブジェクトの境界ボックスと関連するラベルの座標。画像には複数のオブジェクトを含めることができます。 それぞれに独自の境界ボックスとラベルがあり (例: 車と建物を持つことができます)、各オブジェクトは 画像のさまざまな部分に存在する必要があります (たとえば、画像には複数の車が含まれている可能性があります)。 このタスクは、歩行者、道路標識、信号機などを検出するために自動運転で一般的に使用されます。 他のアプリケーションには、画像内のオブジェクトのカウント、画像検索などが含まれます。 このガイドでは、次の方法を学習します。 1. Finetune [DETR](https://huggingface.co/docs/transformers/model_doc/detr)、畳み込みアルゴリズムを組み合わせたモデル [CPPE-5](https://huggingface.co/datasets/cppe-5) 上のエンコーダー/デコーダー トランスフォーマーを備えたバックボーン データセット。 2. 微調整したモデルを推論に使用します。 <Tip> このタスクと互換性のあるすべてのアーキテクチャとチェックポイントを確認するには、[タスクページ](https://huggingface.co/tasks/object-detection) を確認することをお勧めします。 </Tip> 始める前に、必要なライブラリがすべてインストールされていることを確認してください。 ```bash pip install -q datasets transformers evaluate timm albumentations ``` 🤗 データセットを使用して Hugging Face Hub からデータセットをロードし、🤗 トランスフォーマーを使用してモデルをトレーニングします。 データを増強するための`albumentations`。 `timm` は現在、DETR モデルの畳み込みバックボーンをロードするために必要です。 モデルをコミュニティと共有することをお勧めします。 Hugging Face アカウントにログインして、ハブにアップロードします。 プロンプトが表示されたら、トークンを入力してログインします。 ```py >>> from huggingface_hub import notebook_login >>> notebook_login() ``` ## Load the CPPE-5 dataset [CPPE-5 データセット](https://huggingface.co/datasets/cppe-5) には、次の画像が含まれています。 新型コロナウイルス感染症のパンデミックにおける医療用個人保護具 (PPE) を識別する注釈。 データセットをロードすることから始めます。 ```py >>> from datasets import load_dataset >>> cppe5 = load_dataset("cppe-5") >>> cppe5 DatasetDict({ train: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 1000 }) test: Dataset({ features: ['image_id', 'image', 'width', 'height', 'objects'], num_rows: 29 }) }) ``` このデータセットには、1000 枚の画像を含むトレーニング セットと 29 枚の画像を含むテスト セットがすでに付属していることがわかります。 データに慣れるために、例がどのようなものかを調べてください。 ```py >>> cppe5["train"][0] {'image_id': 15, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7F9EC9E77C10>, 'width': 943, 'height': 663, 'objects': {'id': [114, 115, 116, 117], 'area': [3796, 1596, 152768, 81002], 'bbox': [[302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], 'category': [4, 4, 0, 0]}} ``` データセット内の例には次のフィールドがあります。 - `image_id`: サンプルの画像ID - `image`: 画像を含む `PIL.Image.Image` オブジェクト - `width`: 画像の幅 - `height`: 画像の高さ - `objects`: 画像内のオブジェクトの境界ボックスのメタデータを含む辞書: - `id`: アノテーションID - `area`: 境界ボックスの領域 - `bbox`: オブジェクトの境界ボックス ([COCO 形式](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) ) - `category`: オブジェクトのカテゴリー。可能な値には、`Coverall (0)`、`Face_Shield (1)`、`Gloves (2)`、`Goggles (3)`、および `Mask (4)` が含まれます。 `bbox`フィールドが COCO 形式に従っていることに気づくかもしれません。これは DETR モデルが予期する形式です。 ただし、「オブジェクト」内のフィールドのグループ化は、DETR が必要とする注釈形式とは異なります。あなたはするであろう このデータをトレーニングに使用する前に、いくつかの前処理変換を適用する必要があります。 データをさらに深く理解するには、データセット内の例を視覚化します。 ```py >>> import numpy as np >>> import os >>> from PIL import Image, ImageDraw >>> image = cppe5["train"][0]["image"] >>> annotations = cppe5["train"][0]["objects"] >>> draw = ImageDraw.Draw(image) >>> categories = cppe5["train"].features["objects"].feature["category"].names >>> id2label = {index: x for index, x in enumerate(categories, start=0)} >>> label2id = {v: k for k, v in id2label.items()} >>> for i in range(len(annotations["id"])): ... box = annotations["bbox"][i] ... class_idx = annotations["category"][i] ... x, y, w, h = tuple(box) ... draw.rectangle((x, y, x + w, y + h), outline="red", width=1) ... draw.text((x, y), id2label[class_idx], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/TdaqPJO.png" alt="CPPE-5 Image Example"/> </div> 関連付けられたラベルを使用して境界ボックスを視覚化するには、データセットのメタデータからラベルを取得します。 `category`フィールド。 また、ラベル ID をラベル クラスにマッピングする辞書 (`id2label`) やその逆 (`label2id`) を作成することもできます。 これらは、後でモデルをセットアップするときに使用できます。これらのマップを含めると、共有した場合に他の人がモデルを再利用できるようになります。 ハグフェイスハブに取り付けます。 データに慣れるための最後のステップとして、潜在的な問題がないかデータを調査します。データセットに関する一般的な問題の 1 つは、 オブジェクト検出は、画像の端を越えて「伸びる」境界ボックスです。このような「暴走」境界ボックスは、 トレーニング中にエラーが発生するため、この段階で対処する必要があります。このデータセットには、この問題に関する例がいくつかあります。 このガイドでは内容をわかりやすくするために、これらの画像をデータから削除します。 ```py >>> remove_idx = [590, 821, 822, 875, 876, 878, 879] >>> keep = [i for i in range(len(cppe5["train"])) if i not in remove_idx] >>> cppe5["train"] = cppe5["train"].select(keep) ``` ## Preprocess the data モデルを微調整するには、事前トレーニングされたモデルに使用されるアプローチと正確に一致するように、使用する予定のデータを前処理する必要があります。 [`AutoImageProcessor`] は、画像データを処理して `pixel_values`、`pixel_mask`、および DETR モデルをトレーニングできる「ラベル」。画像プロセッサには、心配する必要のないいくつかの属性があります。 - `image_mean = [0.485, 0.456, 0.406 ]` - `image_std = [0.229, 0.224, 0.225]` これらは、モデルの事前トレーニング中に画像を正規化するために使用される平均と標準偏差です。これらの価値観は非常に重要です 事前にトレーニングされた画像モデルを推論または微調整するときに複製します。 微調整するモデルと同じチェックポイントからイメージ プロセッサをインスタンス化します。 ```py >>> from transformers import AutoImageProcessor >>> checkpoint = "facebook/detr-resnet-50" >>> image_processor = AutoImageProcessor.from_pretrained(checkpoint) ``` 画像を`image_processor`に渡す前に、2 つの前処理変換をデータセットに適用します。 - 画像の拡張 - DETR の期待に応えるための注釈の再フォーマット まず、モデルがトレーニング データにオーバーフィットしないようにするために、任意のデータ拡張ライブラリを使用して画像拡張を適用できます。ここでは[Albumentations](https://albumentations.ai/docs/)を使用します... このライブラリは、変換が画像に影響を与え、それに応じて境界ボックスを更新することを保証します。 🤗 データセット ライブラリのドキュメントには、詳細な [物体検出用に画像を拡張する方法に関するガイド](https://huggingface.co/docs/datasets/object_detection) が記載されています。 例としてまったく同じデータセットを使用しています。ここでも同じアプローチを適用し、各画像のサイズを (480, 480) に変更します。 水平に反転して明るくします。 ```py >>> import albumentations >>> import numpy as np >>> import torch >>> transform = albumentations.Compose( ... [ ... albumentations.Resize(480, 480), ... albumentations.HorizontalFlip(p=1.0), ... albumentations.RandomBrightnessContrast(p=1.0), ... ], ... bbox_params=albumentations.BboxParams(format="coco", label_fields=["category"]), ... ) ``` `image_processor` は、注釈が次の形式であることを期待します: `{'image_id': int, 'annotations': List[Dict]}`, ここで、各辞書は COCO オブジェクトの注釈です。 1 つの例として、注釈を再フォーマットする関数を追加してみましょう。 ```py >>> def formatted_anns(image_id, category, area, bbox): ... annotations = [] ... for i in range(0, len(category)): ... new_ann = { ... "image_id": image_id, ... "category_id": category[i], ... "isCrowd": 0, ... "area": area[i], ... "bbox": list(bbox[i]), ... } ... annotations.append(new_ann) ... return annotations ``` これで、画像と注釈の変換を組み合わせてサンプルのバッチで使用できるようになりました。 ```py >>> # transforming a batch >>> def transform_aug_ann(examples): ... image_ids = examples["image_id"] ... images, bboxes, area, categories = [], [], [], [] ... for image, objects in zip(examples["image"], examples["objects"]): ... image = np.array(image.convert("RGB"))[:, :, ::-1] ... out = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) ... area.append(objects["area"]) ... images.append(out["image"]) ... bboxes.append(out["bboxes"]) ... categories.append(out["category"]) ... targets = [ ... {"image_id": id_, "annotations": formatted_anns(id_, cat_, ar_, box_)} ... for id_, cat_, ar_, box_ in zip(image_ids, categories, area, bboxes) ... ] ... return image_processor(images=images, annotations=targets, return_tensors="pt") ``` 🤗 Datasets [`~datasets.Dataset.with_transform`] メソッドを使用して、この前処理関数をデータセット全体に適用します。この方法が適用されるのは、 データセットの要素を読み込むときに、その場で変換します。 この時点で、データセットの例が変換後にどのようになるかを確認できます。テンソルが表示されるはずです `pixel_values`、テンソルと `pixel_mask`、および `labels` を使用します。 ```py >>> cppe5["train"] = cppe5["train"].with_transform(transform_aug_ann) >>> cppe5["train"][15] {'pixel_values': tensor([[[ 0.9132, 0.9132, 0.9132, ..., -1.9809, -1.9809, -1.9809], [ 0.9132, 0.9132, 0.9132, ..., -1.9809, -1.9809, -1.9809], [ 0.9132, 0.9132, 0.9132, ..., -1.9638, -1.9638, -1.9638], ..., [-1.5699, -1.5699, -1.5699, ..., -1.9980, -1.9980, -1.9980], [-1.5528, -1.5528, -1.5528, ..., -1.9980, -1.9809, -1.9809], [-1.5528, -1.5528, -1.5528, ..., -1.9980, -1.9809, -1.9809]], [[ 1.3081, 1.3081, 1.3081, ..., -1.8431, -1.8431, -1.8431], [ 1.3081, 1.3081, 1.3081, ..., -1.8431, -1.8431, -1.8431], [ 1.3081, 1.3081, 1.3081, ..., -1.8256, -1.8256, -1.8256], ..., [-1.3179, -1.3179, -1.3179, ..., -1.8606, -1.8606, -1.8606], [-1.3004, -1.3004, -1.3004, ..., -1.8606, -1.8431, -1.8431], [-1.3004, -1.3004, -1.3004, ..., -1.8606, -1.8431, -1.8431]], [[ 1.4200, 1.4200, 1.4200, ..., -1.6476, -1.6476, -1.6476], [ 1.4200, 1.4200, 1.4200, ..., -1.6476, -1.6476, -1.6476], [ 1.4200, 1.4200, 1.4200, ..., -1.6302, -1.6302, -1.6302], ..., [-1.0201, -1.0201, -1.0201, ..., -1.5604, -1.5604, -1.5604], [-1.0027, -1.0027, -1.0027, ..., -1.5604, -1.5430, -1.5430], [-1.0027, -1.0027, -1.0027, ..., -1.5604, -1.5430, -1.5430]]]), 'pixel_mask': tensor([[1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], ..., [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 1, 1, 1]]), 'labels': {'size': tensor([800, 800]), 'image_id': tensor([756]), 'class_labels': tensor([4]), 'boxes': tensor([[0.7340, 0.6986, 0.3414, 0.5944]]), 'area': tensor([519544.4375]), 'iscrowd': tensor([0]), 'orig_size': tensor([480, 480])}} ``` 個々の画像を正常に拡張し、それらの注釈を準備しました。ただし、前処理はそうではありません。 まだ完成しています。最後のステップでは、画像をバッチ処理するためのカスタム `collat​​e_fn` を作成します。 画像 (現在は `pixel_values`) をバッチ内の最大の画像にパディングし、対応する `pixel_mask` を作成します どのピクセルが実数 (1) で、どのピクセルがパディング (0) であるかを示します。 ```py >>> def collate_fn(batch): ... pixel_values = [item["pixel_values"] for item in batch] ... encoding = image_processor.pad(pixel_values, return_tensors="pt") ... labels = [item["labels"] for item in batch] ... batch = {} ... batch["pixel_values"] = encoding["pixel_values"] ... batch["pixel_mask"] = encoding["pixel_mask"] ... batch["labels"] = labels ... return batch ``` ## Training the DETR model 前のセクションで重労働のほとんどを完了したので、モデルをトレーニングする準備が整いました。 このデータセット内の画像は、サイズを変更した後でも依然として非常に大きいです。これは、このモデルを微調整すると、 少なくとも 1 つの GPU が必要です。 トレーニングには次の手順が含まれます。 1. 前処理と同じチェックポイントを使用して、[`AutoModelForObjectDetection`] でモデルを読み込みます。 2. [`TrainingArguments`] でトレーニング ハイパーパラメータを定義します。 3. トレーニング引数をモデル、データセット、画像プロセッサ、データ照合器とともに [`Trainer`] に渡します。 4. [`~Trainer.train`] を呼び出してモデルを微調整します。 前処理に使用したのと同じチェックポイントからモデルをロードするときは、必ず`label2id`を渡してください。 および `id2label` マップは、以前にデータセットのメタデータから作成したものです。さらに、`ignore_mismatched_sizes=True`を指定して、既存の分類頭部を新しい分類頭部に置き換えます。 ```py >>> from transformers import AutoModelForObjectDetection >>> model = AutoModelForObjectDetection.from_pretrained( ... checkpoint, ... id2label=id2label, ... label2id=label2id, ... ignore_mismatched_sizes=True, ... ) ``` [`TrainingArguments`] で、`output_dir` を使用してモデルの保存場所を指定し、必要に応じてハイパーパラメーターを構成します。 画像列が削除されるため、未使用の列を削除しないことが重要です。画像列がないと、 `pixel_values` を作成できません。このため、`remove_unused_columns`を`False`に設定します。 ハブにプッシュしてモデルを共有したい場合は、`push_to_hub` を `True` に設定します (Hugging にサインインする必要があります) 顔に向かってモデルをアップロードします)。 ```py >>> from transformers import TrainingArguments >>> training_args = TrainingArguments( ... output_dir="detr-resnet-50_finetuned_cppe5", ... per_device_train_batch_size=8, ... num_train_epochs=10, ... fp16=True, ... save_steps=200, ... logging_steps=50, ... learning_rate=1e-5, ... weight_decay=1e-4, ... save_total_limit=2, ... remove_unused_columns=False, ... push_to_hub=True, ... ) ``` 最後に、すべてをまとめて、[`~transformers.Trainer.train`] を呼び出します。 ```py >>> from transformers import Trainer >>> trainer = Trainer( ... model=model, ... args=training_args, ... data_collator=collate_fn, ... train_dataset=cppe5["train"], ... processing_class=image_processor, ... ) >>> trainer.train() ``` `training_args`で`push_to_hub`を`True`に設定した場合、トレーニング チェックポイントは ハグフェイスハブ。トレーニングが完了したら、[`~transformers.Trainer.push_to_hub`] メソッドを呼び出して、最終モデルもハブにプッシュします。 ```py >>> trainer.push_to_hub() ``` ## Evaluate 物体検出モデルは通常、一連の <a href="https://cocodataset.org/#detection-eval">COCO スタイルの指標</a>を使用して評価されます。 既存のメトリクス実装のいずれかを使用できますが、ここでは`torchvision`のメトリクス実装を使用して最終的なメトリクスを評価します。 ハブにプッシュしたモデル。 `torchvision`エバリュエーターを使用するには、グラウンド トゥルース COCO データセットを準備する必要があります。 COCO データセットを構築するための API データを特定の形式で保存する必要があるため、最初に画像と注釈をディスクに保存する必要があります。と同じように トレーニング用にデータを準備するとき、`cppe5["test"]` からの注釈をフォーマットする必要があります。ただし、画像 そのままでいるべきです。 評価ステップには少し作業が必要ですが、大きく 3 つのステップに分けることができます。 まず、`cppe5["test"]` セットを準備します。注釈をフォーマットし、データをディスクに保存します。 ```py >>> import json >>> # format annotations the same as for training, no need for data augmentation >>> def val_formatted_anns(image_id, objects): ... annotations = [] ... for i in range(0, len(objects["id"])): ... new_ann = { ... "id": objects["id"][i], ... "category_id": objects["category"][i], ... "iscrowd": 0, ... "image_id": image_id, ... "area": objects["area"][i], ... "bbox": objects["bbox"][i], ... } ... annotations.append(new_ann) ... return annotations >>> # Save images and annotations into the files torchvision.datasets.CocoDetection expects >>> def save_cppe5_annotation_file_images(cppe5): ... output_json = {} ... path_output_cppe5 = f"{os.getcwd()}/cppe5/" ... if not os.path.exists(path_output_cppe5): ... os.makedirs(path_output_cppe5) ... path_anno = os.path.join(path_output_cppe5, "cppe5_ann.json") ... categories_json = [{"supercategory": "none", "id": id, "name": id2label[id]} for id in id2label] ... output_json["images"] = [] ... output_json["annotations"] = [] ... for example in cppe5: ... ann = val_formatted_anns(example["image_id"], example["objects"]) ... output_json["images"].append( ... { ... "id": example["image_id"], ... "width": example["image"].width, ... "height": example["image"].height, ... "file_name": f"{example['image_id']}.png", ... } ... ) ... output_json["annotations"].extend(ann) ... output_json["categories"] = categories_json ... with open(path_anno, "w") as file: ... json.dump(output_json, file, ensure_ascii=False, indent=4) ... for im, img_id in zip(cppe5["image"], cppe5["image_id"]): ... path_img = os.path.join(path_output_cppe5, f"{img_id}.png") ... im.save(path_img) ... return path_output_cppe5, path_anno ``` 次に、`cocoevaluator`で利用できる`CocoDetection`クラスのインスタンスを用意します。 ```py >>> import torchvision >>> class CocoDetection(torchvision.datasets.CocoDetection): ... def __init__(self, img_folder, image_processor, ann_file): ... super().__init__(img_folder, ann_file) ... self.image_processor = image_processor ... def __getitem__(self, idx): ... # read in PIL image and target in COCO format ... img, target = super(CocoDetection, self).__getitem__(idx) ... # preprocess image and target: converting target to DETR format, ... # resizing + normalization of both image and target) ... image_id = self.ids[idx] ... target = {"image_id": image_id, "annotations": target} ... encoding = self.image_processor(images=img, annotations=target, return_tensors="pt") ... pixel_values = encoding["pixel_values"].squeeze() # remove batch dimension ... target = encoding["labels"][0] # remove batch dimension ... return {"pixel_values": pixel_values, "labels": target} >>> im_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> path_output_cppe5, path_anno = save_cppe5_annotation_file_images(cppe5["test"]) >>> test_ds_coco_format = CocoDetection(path_output_cppe5, im_processor, path_anno) ``` 最後に、メトリクスをロードして評価を実行します。 ```py >>> import evaluate >>> from tqdm import tqdm >>> model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> module = evaluate.load("ybelkada/cocoevaluate", coco=test_ds_coco_format.coco) >>> val_dataloader = torch.utils.data.DataLoader( ... test_ds_coco_format, batch_size=8, shuffle=False, num_workers=4, collate_fn=collate_fn ... ) >>> with torch.no_grad(): ... for idx, batch in enumerate(tqdm(val_dataloader)): ... pixel_values = batch["pixel_values"] ... pixel_mask = batch["pixel_mask"] ... labels = [ ... {k: v for k, v in t.items()} for t in batch["labels"] ... ] # these are in DETR format, resized + normalized ... # forward pass ... outputs = model(pixel_values=pixel_values, pixel_mask=pixel_mask) ... orig_target_sizes = torch.stack([target["orig_size"] for target in labels], dim=0) ... results = im_processor.post_process(outputs, orig_target_sizes) # convert outputs of model to Pascal VOC format (xmin, ymin, xmax, ymax) ... module.add(prediction=results, reference=labels) ... del batch >>> results = module.compute() >>> print(results) Accumulating evaluation results... DONE (t=0.08s). IoU metric: bbox Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.352 Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.681 Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.292 Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.168 Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.208 Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.429 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.274 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.484 Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.191 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.323 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.590 ``` これらの結果は、[`~transformers.TrainingArguments`] のハイパーパラメータを調整することでさらに改善できます。試してごらん! ## Inference DETR モデルを微調整して評価し、Hugging Face Hub にアップロードしたので、それを推論に使用できます。 推論用に微調整されたモデルを試す最も簡単な方法は、それを [`pipeline`] で使用することです。パイプラインをインスタンス化する モデルを使用してオブジェクトを検出し、それに画像を渡します。 ```py >>> from transformers import pipeline >>> import requests >>> url = "https://i.imgur.com/2lnWoly.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> obj_detector = pipeline("object-detection", model="devonho/detr-resnet-50_finetuned_cppe5") >>> obj_detector(image) ``` 必要に応じて、パイプラインの結果を手動で複製することもできます。 ```py >>> image_processor = AutoImageProcessor.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> model = AutoModelForObjectDetection.from_pretrained("devonho/detr-resnet-50_finetuned_cppe5") >>> with torch.no_grad(): ... inputs = image_processor(images=image, return_tensors="pt") ... outputs = model(**inputs) ... target_sizes = torch.tensor([image.size[::-1]]) ... results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[0] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected Coverall with confidence 0.566 at location [1215.32, 147.38, 4401.81, 3227.08] Detected Mask with confidence 0.584 at location [2449.06, 823.19, 3256.43, 1413.9] ``` 結果をプロットしてみましょう: ```py >>> draw = ImageDraw.Draw(image) >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... x, y, x2, y2 = tuple(box) ... draw.rectangle((x, y, x2, y2), outline="red", width=1) ... draw.text((x, y), model.config.id2label[label.item()], fill="white") >>> image ``` <div class="flex justify-center"> <img src="https://i.imgur.com/4QZnf9A.png" alt="Object detection result on a new image"/> </div>
transformers/docs/source/ja/tasks/object_detection.md/0
{ "file_path": "transformers/docs/source/ja/tasks/object_detection.md", "repo_id": "transformers", "token_count": 12694 }
41
<!--Copyright 2021 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 특성 추출기 [[feature-extractor]] 특성 추출기는 오디오 또는 비전 모델을 위한 입력 특성을 준비하는 역할을 합니다. 여기에는 시퀀스에서 특성을 추출하는 작업(예를 들어, 오디오 파일을 전처리하여 Log-Mel 스펙트로그램 특성을 생성하는 것), 이미지에서 특성을 추출하는 작업(예를 들어, 이미지 파일을 자르는 것)이 포함됩니다. 뿐만 아니라 패딩, 정규화 및 NumPy, PyTorch, TensorFlow 텐서로의 변환도 포함됩니다. ## FeatureExtractionMixin [[transformers.FeatureExtractionMixin]] [[autodoc]] feature_extraction_utils.FeatureExtractionMixin - from_pretrained - save_pretrained ## SequenceFeatureExtractor [[transformers.SequenceFeatureExtractor]] [[autodoc]] SequenceFeatureExtractor - pad ## BatchFeature [[transformers.BatchFeature]] [[autodoc]] BatchFeature ## ImageFeatureExtractionMixin [[transformers.ImageFeatureExtractionMixin]] [[autodoc]] image_utils.ImageFeatureExtractionMixin
transformers/docs/source/ko/main_classes/feature_extractor.md/0
{ "file_path": "transformers/docs/source/ko/main_classes/feature_extractor.md", "repo_id": "transformers", "token_count": 779 }
42
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # BERT[[BERT]] <div class="flex flex-wrap space-x-1"> <a href="https://huggingface.co/models?filter=bert"> <img alt="Models" src="https://img.shields.io/badge/All_model_pages-bert-blueviolet"> </a> <a href="https://huggingface.co/spaces/docs-demos/bert-base-uncased"> <img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue"> </a> </div> ## 개요[[Overview]] BERT 모델은 Jacob Devlin. Ming-Wei Chang, Kenton Lee, Kristina Touranova가 제안한 논문 [BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)에서 소개되었습니다. BERT는 사전 학습된 양방향 트랜스포머로, Toronto Book Corpus와 Wikipedia로 구성된 대규모 코퍼스에서 마스킹된 언어 모델링과 다음 문장 예측(Next Sentence Prediction) 목표를 결합해 학습되었습니다. 해당 논문의 초록입니다: *우리는 BERT(Bidirectional Encoder Representations from Transformers)라는 새로운 언어 표현 모델을 소개합니다. 최근의 다른 언어 표현 모델들과 달리, BERT는 모든 계층에서 양방향으로 양쪽 문맥을 조건으로 사용하여 비지도 학습된 텍스트에서 깊이 있는 양방향 표현을 사전 학습하도록 설계되었습니다. 그 결과, 사전 학습된 BERT 모델은 추가적인 출력 계층 하나만으로 질문 응답, 언어 추론과 같은 다양한 작업에서 미세 조정될 수 있으므로, 특정 작업을 위해 아키텍처를 수정할 필요가 없습니다.* *BERT는 개념적으로 단순하면서도 실증적으로 강력한 모델입니다. BERT는 11개의 자연어 처리 과제에서 새로운 최고 성능을 달성했으며, GLUE 점수를 80.5% (7.7% 포인트 절대 개선)로, MultiNLI 정확도를 86.7% (4.6% 포인트 절대 개선), SQuAD v1.1 질문 응답 테스트에서 F1 점수를 93.2 (1.5% 포인트 절대 개선)로, SQuAD v2.0에서 F1 점수를 83.1 (5.1% 포인트 절대 개선)로 향상시켰습니다.* 이 모델은 [thomwolf](https://huggingface.co/thomwolf)가 기여하였습니다. 원본 코드는 [여기](https://github.com/google-research/bert)에서 확인할 수 있습니다. ## 사용 팁[[Usage tips]] - BERT는 절대 위치 임베딩을 사용하는 모델이므로 입력을 왼쪽이 아니라 오른쪽에서 패딩하는 것이 일반적으로 권장됩니다. - BERT는 마스킹된 언어 모델(MLM)과 Next Sentence Prediction(NSP) 목표로 학습되었습니다. 이는 마스킹된 토큰 예측과 전반적인 자연어 이해(NLU)에 뛰어나지만, 텍스트 생성에는 최적화되어있지 않습니다. - BERT의 사전 학습 과정에서는 입력 데이터를 무작위로 마스킹하여 일부 토큰을 마스킹합니다. 전체 토큰 중 약 15%가 다음과 같은 방식으로 마스킹됩니다: * 80% 확률로 마스크 토큰으로 대체 * 10% 확률로 임의의 다른 토큰으로 대체 * 10% 확률로 원래 토큰 그대로 유지 - 모델의 주요 목표는 원본 문장을 예측하는 것이지만, 두 번째 목표가 있습니다: 입력으로 문장 A와 B (사이에는 구분 토큰이 있음)가 주어집니다. 이 문장 쌍이 연속될 확률은 50%이며, 나머지 50%는 서로 무관한 문장들입니다. 모델은 이 두 문장이 아닌지를 예측해야 합니다. ### Scaled Dot Product Attention(SDPA) 사용하기 [[Using Scaled Dot Product Attention (SDPA)]] Pytorch는 `torch.nn.functional`의 일부로 Scaled Dot Product Attention(SDPA) 연산자를 기본적으로 제공합니다. 이 함수는 입력과 하드웨어에 따라 여러 구현 방식을 사용할 수 있습니다. 자세한 내용은 [공식 문서](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)나 [GPU Inference](https://huggingface.co/docs/transformers/main/en/perf_infer_gpu_one#pytorch-scaled-dot-product-attention)에서 확인할 수 있습니다. `torch>=2.1.1`에서는 구현이 가능한 경우 SDPA가 기본적으로 사용되지만, `from_pretrained()`함수에서 `attn_implementation="sdpa"`를 설정하여 SDPA를 명시적으로 사용하도록 지정할 수도 있습니다. ``` from transformers import BertModel model = BertModel.from_pretrained("bert-base-uncased", torch_dtype=torch.float16, attn_implementation="sdpa") ... ``` 최적 성능 향상을 위해 모델을 반정밀도(예: `torch.float16` 또는 `torch.bfloat16`)로 불러오는 것을 권장합니다. 로컬 벤치마크 (A100-80GB, CPUx12, RAM 96.6GB, PyTorch 2.2.0, OS Ubuntu 22.04)에서 `float16`을 사용해 학습 및 추론을 수행한 결과, 다음과 같은 속도 향상이 관찰되었습니다. #### 학습 [[Training]] |batch_size|seq_len|Time per batch (eager - s)|Time per batch (sdpa - s)|Speedup (%)|Eager peak mem (MB)|sdpa peak mem (MB)|Mem saving (%)| |----------|-------|--------------------------|-------------------------|-----------|-------------------|------------------|--------------| |4 |256 |0.023 |0.017 |35.472 |939.213 |764.834 |22.800 | |4 |512 |0.023 |0.018 |23.687 |1970.447 |1227.162 |60.569 | |8 |256 |0.023 |0.018 |23.491 |1594.295 |1226.114 |30.028 | |8 |512 |0.035 |0.025 |43.058 |3629.401 |2134.262 |70.054 | |16 |256 |0.030 |0.024 |25.583 |2874.426 |2134.262 |34.680 | |16 |512 |0.064 |0.044 |46.223 |6964.659 |3961.013 |75.830 | #### 추론 [[Inference]] |batch_size|seq_len|Per token latency eager (ms)|Per token latency SDPA (ms)|Speedup (%)|Mem eager (MB)|Mem BT (MB)|Mem saved (%)| |----------|-------|----------------------------|---------------------------|-----------|--------------|-----------|-------------| |1 |128 |5.736 |4.987 |15.022 |282.661 |282.924 |-0.093 | |1 |256 |5.689 |4.945 |15.055 |298.686 |298.948 |-0.088 | |2 |128 |6.154 |4.982 |23.521 |314.523 |314.785 |-0.083 | |2 |256 |6.201 |4.949 |25.303 |347.546 |347.033 |0.148 | |4 |128 |6.049 |4.987 |21.305 |378.895 |379.301 |-0.107 | |4 |256 |6.285 |5.364 |17.166 |443.209 |444.382 |-0.264 | ## 자료[[Resources]] BERT를 시작하는 데 도움이 되는 Hugging Face와 community 자료 목록(🌎로 표시됨) 입니다. 여기에 포함될 자료를 제출하고 싶다면 PR(Pull Request)를 열어주세요. 리뷰 해드리겠습니다! 자료는 기존 자료를 복제하는 대신 새로운 내용을 담고 있어야 합니다. <PipelineTag pipeline="text-classification"/> - [BERT 텍스트 분류 (다른 언어로)](https://www.philschmid.de/bert-text-classification-in-a-different-language)에 대한 블로그 포스트. - [다중 레이블 텍스트 분류를 위한 BERT (및 관련 모델) 미세 조정](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Fine_tuning_BERT_(and_friends)_for_multi_label_text_classification.ipynb)에 대한 노트북. - [PyTorch를 이용해 BERT를 다중 레이블 분류를 위해 미세 조정하는 방법](htt기ps://colab.research.google.com/github/abhimishra91/transformers-tutorials/blob/master/transformers_multi_label_classification.ipynb)에 대한 노트북. 🌎 - [BERT로 EncoderDecoder 모델을 warm-start하여 요약하기](https://colab.research.google.com/github/patrickvonplaten/notebooks/blob/master/BERT2BERT_for_CNN_Dailymail.ipynb)에 대한 노트북. - [`BertForSequenceClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification.ipynb)에서 지원됩니다. - [`TFBertForSequenceClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification-tf.ipynb)에서 지원됩니다. - [`FlaxBertForSequenceClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/text_classification_flax.ipynb)에서 지원됩니다. - [텍스트 분류 작업 가이드](../tasks/sequence_classification) <PipelineTag pipeline="token-classification"/> - [Keras와 함께 Hugging Face Transformers를 사용하여 비영리 BERT를 개체명 인식(NER)용으로 미세 조정하는 방법](https://www.philschmid.de/huggingface-transformers-keras-tf)에 대한 블로그 포스트. - [BERT를 개체명 인식을 위해 미세 조정하기](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT_only_first_wordpiece.ipynb)에 대한 노트북. 각 단어의 첫 번째 wordpiece에만 레이블을 지정하여 학습하는 방법을 설명합니다. 모든 wordpiece에 레이블을 전파하는 방법은 [이 버전](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/BERT/Custom_Named_Entity_Recognition_with_BERT.ipynb)에서 확인할 수 있습니다. - [`BertForTokenClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification.ipynb)에서 지원됩니다. - [`TFBertForTokenClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/token-classification)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/token_classification-tf.ipynb)에서 지원됩니다. - [`FlaxBertForTokenClassification`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification)에서 지원됩니다. - 🤗 Hugging Face 코스의 [토큰 분류 챕터](https://huggingface.co/course/chapter7/2?fw=pt). - [토큰 분류 작업 가이드](../tasks/token_classification) <PipelineTag pipeline="fill-mask"/> - [`BertForMaskedLM`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling#robertabertdistilbert-and-masked-language-modeling)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling.ipynb)에서 지원됩니다. - [`TFBertForMaskedLM`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling#run_mlmpy) 와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/language_modeling-tf.ipynb)에서 지원됩니다. - [`FlaxBertForMaskedLM`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling#masked-language-modeling)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/masked_language_modeling_flax.ipynb)에서 지원됩니다. - 🤗 Hugging Face 코스의 [마스킹된 언어 모델링 챕터](https://huggingface.co/course/chapter7/3?fw=pt). - [마스킹된 언어 모델링 작업 가이드](../tasks/masked_language_modeling) <PipelineTag pipeline="question-answering"/> - [`BertForQuestionAnswering`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering.ipynb)에서 지원됩니다. - [`TFBertForQuestionAnswering`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) 와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb)에서 지원됩니다. - [`FlaxBertForQuestionAnswering`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering)에서 지원됩니다. - 🤗 Hugging Face 코스의 [질문 답변 챕터](https://huggingface.co/course/chapter7/7?fw=pt). - [질문 답변 작업 가이드](../tasks/question_answering) **다중 선택** - [`BertForMultipleChoice`]이 [예제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/pytorch/multiple-choice)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice.ipynb)에서 지원됩니다. - [`TFBertForMultipleChoice`]이 [에제 스크립트](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/multiple-choice)와 [노트북](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/multiple_choice-tf.ipynb)에서 지원됩니다. - [다중 선택 작업 가이드](../tasks/multiple_choice) ⚡️ **추론** - [Hugging Face Transformers와 AWS Inferentia를 사용하여 BERT 추론을 가속화하는 방법](https://huggingface.co/blog/bert-inferentia-sagemaker)에 대한 블로그 포스트. - [GPU에서 DeepSpeed-Inference로 BERT 추론을 가속화하는 방법](https://www.philschmid.de/bert-deepspeed-inference)에 대한 블로그 포스트. ⚙️ **사전 학습** - [Hugging Face Optimum으로 Transformers를 ONMX로 변환하는 방법](https://www.philschmid.de/pre-training-bert-habana)에 대한 블로그 포스트. 🚀 **배포** - [Hugging Face Optimum으로 Transformers를 ONMX로 변환하는 방법](https://www.philschmid.de/convert-transformers-to-onnx)에 대한 블로그 포스트. - [AWS에서 Hugging Face Transformers를 위한 Habana Gaudi 딥러닝 환경 설정 방법](https://www.philschmid.de/getting-started-habana-gaudi#conclusion)에 대한 블로그 포스트. - [Hugging Face Transformers, Amazon SageMaker 및 Terraform 모듈을 이용한 BERT 자동 확장](https://www.philschmid.de/terraform-huggingface-amazon-sagemaker-advanced)에 대한 블로그 포스트. - [Hugging Face, AWS Lambda, Docker를 활용하여 서버리스 BERT 설정하는 방법](https://www.philschmid.de/serverless-bert-with-huggingface-aws-lambda-docker)에 대한 블로그 포스트. - [Amazon SageMaker와 Training Compiler를 사용하여 Hugging Face Transformers에서 BERT 미세 조정하는 방법](https://www.philschmid.de/huggingface-amazon-sagemaker-training-compiler)에 대한 블로그. - [Amazon SageMaker를 사용한 Transformers와 BERT의 작업별 지식 증류](https://www.philschmid.de/knowledge-distillation-bert-transformers)에 대한 블로그 포스트. ## BertConfig [[autodoc]] BertConfig - all ## BertTokenizer [[autodoc]] BertTokenizer - build_inputs_with_special_tokens - get_special_tokens_mask - create_token_type_ids_from_sequences - save_vocabulary <frameworkcontent> <pt> ## BertTokenizerFast [[autodoc]] BertTokenizerFast </pt> <tf> ## TFBertTokenizer [[autodoc]] TFBertTokenizer </tf> </frameworkcontent> ## Bert specific outputs [[autodoc]] models.bert.modeling_bert.BertForPreTrainingOutput [[autodoc]] models.bert.modeling_tf_bert.TFBertForPreTrainingOutput [[autodoc]] models.bert.modeling_flax_bert.FlaxBertForPreTrainingOutput <frameworkcontent> <pt> ## BertModel [[autodoc]] BertModel - forward ## BertForPreTraining [[autodoc]] BertForPreTraining - forward ## BertLMHeadModel [[autodoc]] BertLMHeadModel - forward ## BertForMaskedLM [[autodoc]] BertForMaskedLM - forward ## BertForNextSentencePrediction [[autodoc]] BertForNextSentencePrediction - forward ## BertForSequenceClassification [[autodoc]] BertForSequenceClassification - forward ## BertForMultipleChoice [[autodoc]] BertForMultipleChoice - forward ## BertForTokenClassification [[autodoc]] BertForTokenClassification - forward ## BertForQuestionAnswering [[autodoc]] BertForQuestionAnswering - forward </pt> <tf> ## TFBertModel [[autodoc]] TFBertModel - call ## TFBertForPreTraining [[autodoc]] TFBertForPreTraining - call ## TFBertModelLMHeadModel [[autodoc]] TFBertLMHeadModel - call ## TFBertForMaskedLM [[autodoc]] TFBertForMaskedLM - call ## TFBertForNextSentencePrediction [[autodoc]] TFBertForNextSentencePrediction - call ## TFBertForSequenceClassification [[autodoc]] TFBertForSequenceClassification - call ## TFBertForMultipleChoice [[autodoc]] TFBertForMultipleChoice - call ## TFBertForTokenClassification [[autodoc]] TFBertForTokenClassification - call ## TFBertForQuestionAnswering [[autodoc]] TFBertForQuestionAnswering - call </tf> <jax> ## FlaxBertModel [[autodoc]] FlaxBertModel - __call__ ## FlaxBertForPreTraining [[autodoc]] FlaxBertForPreTraining - __call__ ## FlaxBertForCausalLM [[autodoc]] FlaxBertForCausalLM - __call__ ## FlaxBertForMaskedLM [[autodoc]] FlaxBertForMaskedLM - __call__ ## FlaxBertForNextSentencePrediction [[autodoc]] FlaxBertForNextSentencePrediction - __call__ ## FlaxBertForSequenceClassification [[autodoc]] FlaxBertForSequenceClassification - __call__ ## FlaxBertForMultipleChoice [[autodoc]] FlaxBertForMultipleChoice - __call__ ## FlaxBertForTokenClassification [[autodoc]] FlaxBertForTokenClassification - __call__ ## FlaxBertForQuestionAnswering [[autodoc]] FlaxBertForQuestionAnswering - __call__ </jax> </frameworkcontent>
transformers/docs/source/ko/model_doc/bert.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/bert.md", "repo_id": "transformers", "token_count": 10657 }
43
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # GPT-NeoX-Japanese [[gpt-neox-japanese]] ## 개요 [[overview]] 일본어를 위한 자동회귀 언어 모델인 GPT-NeoX-Japanese를 소개합니다. 이 모델은 [https://github.com/EleutherAI/gpt-neox](https://github.com/EleutherAI/gpt-neox)에서 학습되었습니다. 일본어는 많은 어휘와 히라가나, 가타카나, 한자의 조합으로 이루어진 독특한 언어입니다. 이러한 일본어의 독특한 구조를 해결하기 위해 [특수 서브워드 토크나이저](https://github.com/tanreinama/Japanese-BPEEncoder_V2)를 사용했습니다. 이 유용한 토크나이저를 오픈소스로 제공해 준 *tanreinama*에게 매우 감사드립니다. 이 모델은 Google의 [PaLM](https://ai.googleblog.com/2022/04/pathways-language-model-palm-scaling-to.html) 연구 권장 사항을 따르며, 트랜스포머 블록에서 편향 파라미터를 제거하여 모델 성능을 향상시켰습니다. 자세한 내용은 [이 기사](https://medium.com/ml-abeja/training-a-better-gpt-2-93b157662ae4)를 참조하세요. 모델 개발은 [ABEJA, Inc.](https://www.abejainc.com/)의 [신야 오타니](https://github.com/SO0529), [타카요시 마카베](https://github.com/spider-man-tm), [안주 아로라](https://github.com/Anuj040), [쿄 하토리](https://github.com/go5paopao)에 의해 주도되었습니다. 이 모델 개발 활동에 대한 자세한 내용은 [여기](https://tech-blog.abeja.asia/entry/abeja-gpt-project-202207)를 참조하세요. ### 사용 예시 [[usage-example]] `generate()` 메서드를 사용하면 GPT NeoX Japanese 모델을 통해 텍스트를 생성할 수 있습니다. ```python >>> from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseTokenizer >>> model = GPTNeoXJapaneseForCausalLM.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> tokenizer = GPTNeoXJapaneseTokenizer.from_pretrained("abeja/gpt-neox-japanese-2.7b") >>> prompt = "人とAIが協調するためには、" >>> input_ids = tokenizer(prompt, return_tensors="pt").input_ids >>> gen_tokens = model.generate( ... input_ids, ... do_sample=True, ... temperature=0.9, ... max_length=100, ... ) >>> gen_text = tokenizer.batch_decode(gen_tokens, skip_special_tokens=True)[0] >>> print(gen_text) 人とAIが協調するためには、AIと人が共存し、AIを正しく理解する必要があります。 ``` ## 자료 [[resources]] - [일상 언어 모델링 작업 가이드 ](../tasks/language_modeling) ## GPTNeoXJapanese 설정 (GPTNeoXJapaneseConfig) [[transformers.GPTNeoXJapaneseConfig]] [[autodoc]] GPTNeoXJapaneseConfig ## GPTNeoXJapanese토큰화 (GPTNeoXJapaneseTokenizer) [[transformers.GPTNeoXJapaneseTokenizer]] [[autodoc]] GPTNeoXJapaneseTokenizer ## GPTNeoXJapaneseModel [[transformers.GPTNeoXJapaneseModel]] [[autodoc]] GPTNeoXJapaneseModel - forward ## 일상 LLM 을 위한 GPTNeoXJapanese(GPTNeoXJapaneseForCausalLM) [[transformers.GPTNeoXJapaneseForCausalLM]] [[autodoc]] GPTNeoXJapaneseForCausalLM - forward
transformers/docs/source/ko/model_doc/gpt_neox_japanese.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/gpt_neox_japanese.md", "repo_id": "transformers", "token_count": 1909 }
44
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Swin2SR [[swin2sr]] ## 개요 [[overview]] Swin2SR 모델은 Marcos V. Conde, Ui-Jin Choi, Maxime Burchi, Radu Timofte가 제안한 논문 [Swin2SR: SwinV2 Transformer for Compressed Image Super-Resolution and Restoration](https://arxiv.org/abs/2209.11345)에서 소개되었습니다. Swin2SR은 [SwinIR](https://github.com/JingyunLiang/SwinIR/) 모델을 개선하고자 [Swin Transformer v2](swinv2) 레이어를 도입함으로써, 훈련 불안정성, 사전 훈련과 미세 조정 간의 해상도 차이, 그리고 데이터 의존성 문제를 완화시킵니다. 논문의 초록은 다음과 같습니다: *압축은 스트리밍 서비스, 가상 현실, 비디오 게임과 같은 대역폭이 제한된 시스템을 통해 이미지와 영상을 효율적으로 전송하고 저장하는 데 중요한 역할을 합니다. 하지만 압축은 필연적으로 원본 정보의 손실과 아티팩트를 초래하며, 이는 시각적 품질을 심각하게 저하시킬 수 있습니다. 이러한 이유로, 압축된 이미지의 품질 향상은 활발한 연구 주제가 되고 있습니다. 현재 대부분의 최첨단 이미지 복원 방법은 합성곱 신경망을 기반으로 하지만, SwinIR과 같은 트랜스포머 기반 방법들도 이 작업에서 인상적인 성능을 보여주고 있습니다. 이번 논문에서는 Swin Transformer V2를 사용해 SwinIR을 개선하여 이미지 초해상도 작업, 특히 압축된 입력 시나리오에서 성능을 향상시키고자 합니다. 이 방법을 통해 트랜스포머 비전 모델을 훈련할 때 발생하는 주요 문제들, 예를 들어 훈련 불안정성, 사전 훈련과 미세 조정 간 해상도 차이, 그리고 데이터 의존성을 해결할 수 있습니다. 우리는 JPEG 압축 아티팩트 제거, 이미지 초해상도(클래식 및 경량), 그리고 압축된 이미지 초해상도라는 세 가지 대표적인 작업에서 실험을 수행했습니다. 실험 결과, 우리의 방법인 Swin2SR은 SwinIR의 훈련 수렴성과 성능을 향상시킬 수 있으며, "AIM 2022 Challenge on Super-Resolution of Compressed Image and Video"에서 상위 5위 솔루션으로 선정되었습니다.* <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/swin2sr_architecture.png" alt="drawing" width="600"/> <small> Swin2SR 아키텍처. <a href="https://arxiv.org/abs/2209.11345">원본 논문</a>에서 발췌.</small> 이 모델은 [nielsr](https://huggingface.co/nielsr)가 기여하였습니다. 원본 코드는 [여기](https://github.com/mv-lab/swin2sr)에서 확인할 수 있습니다. ## 리소스 [[resources]] Swin2SR demo notebook은 [여기](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/Swin2SR)에서 확인할 수 있습니다. SwinSR을 활용한 image super-resolution demo space는 [여기](https://huggingface.co/spaces/jjourney1125/swin2sr)에서 확인할 수 있습니다. ## Swin2SRImageProcessor [[transformers.Swin2SRImageProcessor]] [[autodoc]] Swin2SRImageProcessor - preprocess ## Swin2SRConfig [[transformers.Swin2SRConfig]] [[autodoc]] Swin2SRConfig ## Swin2SRModel [[transformers.Swin2SRModel]] [[autodoc]] Swin2SRModel - forward ## Swin2SRForImageSuperResolution [[transformers.Swin2SRForImageSuperResolution]] [[autodoc]] Swin2SRForImageSuperResolution - forward
transformers/docs/source/ko/model_doc/swin2sr.md/0
{ "file_path": "transformers/docs/source/ko/model_doc/swin2sr.md", "repo_id": "transformers", "token_count": 2502 }
45
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # CPU에서 효율적인 추론하기 [[efficient-inference-on-cpu]] 이 가이드는 CPU에서 대규모 모델을 효율적으로 추론하는 방법에 중점을 두고 있습니다. ## 더 빠른 추론을 위한 `BetterTransformer` [[bettertransformer-for-faster-inference]] 우리는 최근 CPU에서 텍스트, 이미지 및 오디오 모델의 빠른 추론을 위해 `BetterTransformer`를 통합했습니다. 이 통합에 대한 더 자세한 내용은 [이 문서](https://huggingface.co/docs/optimum/bettertransformer/overview)를 참조하세요. ## PyTorch JIT 모드 (TorchScript) [[pytorch-jitmode-torchscript]] TorchScript는 PyTorch 코드에서 직렬화와 최적화가 가능한 모델을 생성할때 쓰입니다. TorchScript로 만들어진 프로그램은 기존 Python 프로세스에서 저장한 뒤, 종속성이 없는 새로운 프로세스로 가져올 수 있습니다. PyTorch의 기본 설정인 `eager` 모드와 비교했을때, `jit` 모드는 연산자 결합과 같은 최적화 방법론을 통해 모델 추론에서 대부분 더 나은 성능을 제공합니다. TorchScript에 대한 친절한 소개는 [PyTorch TorchScript 튜토리얼](https://pytorch.org/tutorials/beginner/Intro_to_TorchScript_tutorial.html#tracing-modules)을 참조하세요. ### JIT 모드와 함께하는 IPEX 그래프 최적화 [[ipex-graph-optimization-with-jitmode]] Intel® Extension for PyTorch(IPEX)는 Transformers 계열 모델의 jit 모드에서 추가적인 최적화를 제공합니다. jit 모드와 더불어 Intel® Extension for PyTorch(IPEX)를 활용하시길 강력히 권장드립니다. Transformers 모델에서 자주 사용되는 일부 연산자 패턴은 이미 jit 모드 연산자 결합(operator fusion)의 형태로 Intel® Extension for PyTorch(IPEX)에서 지원되고 있습니다. Multi-head-attention, Concat Linear, Linear+Add, Linear+Gelu, Add+LayerNorm 결합 패턴 등이 이용 가능하며 활용했을 때 성능이 우수합니다. 연산자 결합의 이점은 사용자에게 고스란히 전달됩니다. 분석에 따르면, 질의 응답, 텍스트 분류 및 토큰 분류와 같은 가장 인기 있는 NLP 태스크 중 약 70%가 이러한 결합 패턴을 사용하여 Float32 정밀도와 BFloat16 혼합 정밀도 모두에서 성능상의 이점을 얻을 수 있습니다. [IPEX 그래프 최적화](https://intel.github.io/intel-extension-for-pytorch/cpu/latest/tutorials/features/graph_optimization.html)에 대한 자세한 정보를 확인하세요. #### IPEX 설치: [[ipex-installation]] IPEX 배포 주기는 PyTorch를 따라서 이루어집니다. 자세한 정보는 [IPEX 설치 방법](https://intel.github.io/intel-extension-for-pytorch/)을 확인하세요. ### JIT 모드 사용법 [[usage-of-jitmode]] 평가 또는 예측을 위해 Trainer에서 JIT 모드를 사용하려면 Trainer의 명령 인수에 `jit_mode_eval`을 추가해야 합니다. <Tip warning={true}> PyTorch의 버전이 1.14.0 이상이라면, jit 모드는 jit.trace에서 dict 입력이 지원되므로, 모든 모델의 예측과 평가가 개선될 수 있습니다. PyTorch의 버전이 1.14.0 미만이라면, 질의 응답 모델과 같이 forward 매개변수의 순서가 jit.trace의 튜플 입력 순서와 일치하는 모델에 득이 될 수 있습니다. 텍스트 분류 모델과 같이 forward 매개변수 순서가 jit.trace의 튜플 입력 순서와 다른 경우, jit.trace가 실패하며 예외가 발생합니다. 이때 예외상황을 사용자에게 알리기 위해 Logging이 사용됩니다. </Tip> [Transformers 질의 응답](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)의 사용 사례 예시를 참조하세요. - CPU에서 jit 모드를 사용한 추론: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--jit_mode_eval </b></pre> - CPU에서 IPEX와 함께 jit 모드를 사용한 추론: <pre>python run_qa.py \ --model_name_or_path csarron/bert-base-uncased-squad-v1 \ --dataset_name squad \ --do_eval \ --max_seq_length 384 \ --doc_stride 128 \ --output_dir /tmp/ \ --no_cuda \ <b>--use_ipex \</b> <b>--jit_mode_eval</b></pre>
transformers/docs/source/ko/perf_infer_cpu.md/0
{ "file_path": "transformers/docs/source/ko/perf_infer_cpu.md", "repo_id": "transformers", "token_count": 3075 }
46
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # EETQ [[eetq]] [EETQ](https://github.com/NetEase-FuXi/EETQ) 라이브러리는 NVIDIA GPU에 대해 int8 채널별(per-channel) 가중치 전용 양자화(weight-only quantization)을 지원합니다. 고성능 GEMM 및 GEMV 커널은 FasterTransformer 및 TensorRT-LLM에서 가져왔습니다. 교정(calibration) 데이터셋이 필요 없으며, 모델을 사전에 양자화할 필요도 없습니다. 또한, 채널별 양자화(per-channel quantization) 덕분에 정확도 저하가 미미합니다. [릴리스 페이지](https://github.com/NetEase-FuXi/EETQ/releases)에서 eetq를 설치했는지 확인하세요. ``` pip install --no-cache-dir https://github.com/NetEase-FuXi/EETQ/releases/download/v1.0.0/EETQ-1.0.0+cu121+torch2.1.2-cp310-cp310-linux_x86_64.whl ``` 또는 소스 코드 https://github.com/NetEase-FuXi/EETQ 에서 설치할 수 있습니다. EETQ는 CUDA 기능이 8.9 이하이고 7.0 이상이어야 합니다. ``` git clone https://github.com/NetEase-FuXi/EETQ.git cd EETQ/ git submodule update --init --recursive pip install . ``` 비양자화 모델은 "from_pretrained"를 통해 양자화할 수 있습니다. ```py from transformers import AutoModelForCausalLM, EetqConfig path = "/path/to/model". quantization_config = EetqConfig("int8") model = AutoModelForCausalLM.from_pretrained(path, device_map="auto", quantization_config=quantization_config) ``` 양자화된 모델은 "save_pretrained"를 통해 저장할 수 있으며, "from_pretrained"를 통해 다시 사용할 수 있습니다. ```py quant_path = "/path/to/save/quantized/model" model.save_pretrained(quant_path) model = AutoModelForCausalLM.from_pretrained(quant_path, device_map="auto") ```
transformers/docs/source/ko/quantization/eetq.md/0
{ "file_path": "transformers/docs/source/ko/quantization/eetq.md", "repo_id": "transformers", "token_count": 1163 }
47
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 컴퓨터 비전을 위한 지식 증류[[Knowledge-Distillation-for-Computer-Vision]] [[open-in-colab]] 지식 증류(Knowledge distillation)는 더 크고 복잡한 모델(교사)에서 더 작고 간단한 모델(학생)로 지식을 전달하는 기술입니다. 한 모델에서 다른 모델로 지식을 증류하기 위해, 특정 작업(이 경우 이미지 분류)에 대해 학습된 사전 훈련된 교사 모델을 사용하고, 랜덤으로 초기화된 학생 모델을 이미지 분류 작업에 대해 학습합니다. 그다음, 학생 모델이 교사 모델의 출력을 모방하여 두 모델의 출력 차이를 최소화하도록 훈련합니다. 이 기법은 Hinton 등 연구진의 [Distilling the Knowledge in a Neural Network](https://arxiv.org/abs/1503.02531)에서 처음 소개되었습니다. 이 가이드에서는 특정 작업에 맞춘 지식 증류를 수행할 것입니다. 이번에는 [beans dataset](https://huggingface.co/datasets/beans)을 사용할 것입니다. 이 가이드는 [미세 조정된 ViT 모델](https://huggingface.co/merve/vit-mobilenet-beans-224) (교사 모델)을 [MobileNet](https://huggingface.co/google/mobilenet_v2_1.4_224) (학생 모델)으로 증류하는 방법을 🤗 Transformers의 [Trainer API](https://huggingface.co/docs/transformers/en/main_classes/trainer#trainer) 를 사용하여 보여줍니다. 증류와 과정 평가를 위해 필요한 라이브러리를 설치해 봅시다. ```bash pip install transformers datasets accelerate tensorboard evaluate --upgrade ``` 이 예제에서는 `merve/beans-vit-224` 모델을 교사 모델로 사용하고 있습니다. 이 모델은 beans 데이터셋에서 파인 튜닝된 `google/vit-base-patch16-224-in21k` 기반의 이미지 분류 모델입니다. 이 모델을 무작위로 초기화된 MobileNetV2로 증류해볼 것입니다. 이제 데이터셋을 로드하겠습니다. ```python from datasets import load_dataset dataset = load_dataset("beans") ``` 이 경우 두 모델의 이미지 프로세서가 동일한 해상도로 동일한 출력을 반환하기 때문에, 두가지를 모두 사용할 수 있습니다. 데이터셋의 모든 분할마다 전처리를 적용하기 위해 `dataset`의 `map()` 메소드를 사용할 것 입니다. ```python from transformers import AutoImageProcessor teacher_processor = AutoImageProcessor.from_pretrained("merve/beans-vit-224") def process(examples): processed_inputs = teacher_processor(examples["image"]) return processed_inputs processed_datasets = dataset.map(process, batched=True) ``` 학생 모델(무작위로 초기화된 MobileNet)이 교사 모델(파인 튜닝된 비전 트랜스포머)을 모방하도록 할 것 입니다. 이를 위해 먼저 교사와 학생 모델의 로짓 출력값을 구합니다. 그런 다음 각 출력값을 매개변수 `temperature` 값으로 나누는데, 이 매개변수는 각 소프트 타겟의 중요도를 조절하는 역할을 합니다. 매개변수 `lambda` 는 증류 손실의 중요도에 가중치를 줍니다. 이 예제에서는 `temperature=5`와 `lambda=0.5`를 사용할 것입니다. 학생과 교사 간의 발산을 계산하기 위해 Kullback-Leibler Divergence 손실을 사용합니다. 두 데이터 P와 Q가 주어졌을 때, KL Divergence는 Q를 사용하여 P를 표현하는 데 얼만큼의 추가 정보가 필요한지를 말해줍니다. 두 데이터가 동일하다면, KL Divergence는 0이며, Q로 P를 설명하는 데 추가 정보가 필요하지 않음을 의미합니다. 따라서 지식 증류의 맥락에서 KL Divergence는 유용합니다. ```python from transformers import TrainingArguments, Trainer import torch import torch.nn as nn import torch.nn.functional as F class ImageDistilTrainer(Trainer): def __init__(self, teacher_model=None, student_model=None, temperature=None, lambda_param=None, *args, **kwargs): super().__init__(model=student_model, *args, **kwargs) self.teacher = teacher_model self.student = student_model self.loss_function = nn.KLDivLoss(reduction="batchmean") device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') self.teacher.to(device) self.teacher.eval() self.temperature = temperature self.lambda_param = lambda_param def compute_loss(self, student, inputs, return_outputs=False): student_output = self.student(**inputs) with torch.no_grad(): teacher_output = self.teacher(**inputs) # 교사와 학생의 소프트 타겟(soft targets) 계산 soft_teacher = F.softmax(teacher_output.logits / self.temperature, dim=-1) soft_student = F.log_softmax(student_output.logits / self.temperature, dim=-1) # 손실(loss) 계산 distillation_loss = self.loss_function(soft_student, soft_teacher) * (self.temperature ** 2) # 실제 레이블 손실 계산 student_target_loss = student_output.loss # 최종 손실 계산 loss = (1. - self.lambda_param) * student_target_loss + self.lambda_param * distillation_loss return (loss, student_output) if return_outputs else loss ``` 이제 Hugging Face Hub에 로그인하여 `Trainer`를 통해 Hugging Face Hub에 모델을 푸시할 수 있도록 하겠습니다. ```python from huggingface_hub import notebook_login notebook_login() ``` 이제 `TrainingArguments`, 교사 모델과 학생 모델을 설정하겠습니다. ```python from transformers import AutoModelForImageClassification, MobileNetV2Config, MobileNetV2ForImageClassification training_args = TrainingArguments( output_dir="my-awesome-model", num_train_epochs=30, fp16=True, logging_dir=f"{repo_name}/logs", logging_strategy="epoch", eval_strategy="epoch", save_strategy="epoch", load_best_model_at_end=True, metric_for_best_model="accuracy", report_to="tensorboard", push_to_hub=True, hub_strategy="every_save", hub_model_id=repo_name, ) num_labels = len(processed_datasets["train"].features["labels"].names) # 모델 초기화 teacher_model = AutoModelForImageClassification.from_pretrained( "merve/beans-vit-224", num_labels=num_labels, ignore_mismatched_sizes=True ) # MobileNetV2 밑바닥부터 학습 student_config = MobileNetV2Config() student_config.num_labels = num_labels student_model = MobileNetV2ForImageClassification(student_config) ``` `compute_metrics` 함수를 사용하여 테스트 세트에서 모델을 평가할 수 있습니다. 이 함수는 훈련 과정에서 모델의 `accuracy`와 `f1`을 계산하는 데 사용됩니다. ```python import evaluate import numpy as np accuracy = evaluate.load("accuracy") def compute_metrics(eval_pred): predictions, labels = eval_pred acc = accuracy.compute(references=labels, predictions=np.argmax(predictions, axis=1)) return {"accuracy": acc["accuracy"]} ``` 정의한 훈련 인수로 `Trainer`를 초기화해봅시다. 또한 데이터 콜레이터(data collator)를 초기화하겠습니다. ```python from transformers import DefaultDataCollator data_collator = DefaultDataCollator() trainer = ImageDistilTrainer( student_model=student_model, teacher_model=teacher_model, training_args=training_args, train_dataset=processed_datasets["train"], eval_dataset=processed_datasets["validation"], data_collator=data_collator, tokenizer=teacher_processor, compute_metrics=compute_metrics, temperature=5, lambda_param=0.5 ) ``` 이제 모델을 훈련할 수 있습니다. ```python trainer.train() ``` 모델을 테스트 세트에서 평가할 수 있습니다. ```python trainer.evaluate(processed_datasets["test"]) ``` 테스트 세트에서 모델의 정확도는 72%에 도달했습니다. 증류의 효율성을 검증하기 위해 동일한 하이퍼파라미터로 beans 데이터셋에서 MobileNet을 처음부터 훈련하였고, 테스트 세트에서의 정확도는 63% 였습니다. 다양한 사전 훈련된 교사 모델, 학생 구조, 증류 매개변수를 시도해보시고 결과를 보고하기를 권장합니다. 증류된 모델의 훈련 로그와 체크포인트는 [이 저장소](https://huggingface.co/merve/vit-mobilenet-beans-224)에서 찾을 수 있으며, 처음부터 훈련된 MobileNetV2는 이 [저장소](https://huggingface.co/merve/resnet-mobilenet-beans-5)에서 찾을 수 있습니다.
transformers/docs/source/ko/tasks/knowledge_distillation_for_image_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/knowledge_distillation_for_image_classification.md", "repo_id": "transformers", "token_count": 5192 }
48
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 제로샷(zero-shot) 이미지 분류[[zeroshot-image-classification]] [[open-in-colab]] 제로샷(zero-shot) 이미지 분류는 특정 카테고리의 예시가 포함된 데이터를 학습되지 않은 모델을 사용해 이미지 분류를 수행하는 작업입니다. 일반적으로 이미지 분류를 위해서는 레이블이 달린 특정 이미지 데이터로 모델 학습이 필요하며, 이 모델은 특정 이미지의 특징을 레이블에 "매핑"하는 방법을 학습합니다. 새로운 레이블이 있는 분류 작업에 이러한 모델을 사용해야 하는 경우에는, 모델을 "재보정"하기 위해 미세 조정이 필요합니다. 이와 대조적으로, 제로샷 또는 개방형 어휘(open vocabulary) 이미지 분류 모델은 일반적으로 대규모 이미지 데이터와 해당 설명에 대해 학습된 멀티모달(multimodal) 모델입니다. 이러한 모델은 제로샷 이미지 분류를 포함한 많은 다운스트림 작업에 사용할 수 있는 정렬된(aligned) 비전 언어 표현을 학습합니다. 이는 이미지 분류에 대한 보다 유연한 접근 방식으로, 추가 학습 데이터 없이 새로운 레이블이나 학습하지 못한 카테고리에 대해 모델을 일반화할 수 있습니다. 또한, 사용자가 대상 개체에 대한 자유 형식의 텍스트 설명으로 이미지를 검색할 수 있습니다. 이번 가이드에서 배울 내용은 다음과 같습니다: * 제로샷 이미지 분류 파이프라인 만들기 * 직접 제로샷 이미지 분류 모델 추론 실행하기 시작하기 전에 필요한 라이브러리가 모두 설치되어 있는지 확인하세요: ```bash pip install -q transformers ``` ## 제로샷(zero-shot) 이미지 분류 파이프라인[[zeroshot-image-classification-pipeline]] [`pipeline`]을 활용하면 가장 간단하게 제로샷 이미지 분류를 지원하는 모델로 추론해볼 수 있습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads)에서 파이프라인을 인스턴스화합니다. ```python >>> from transformers import pipeline >>> checkpoint = "openai/clip-vit-large-patch14" >>> detector = pipeline(model=checkpoint, task="zero-shot-image-classification") ``` 다음으로, 분류하고 싶은 이미지를 선택하세요. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/g8oS8-82DxI/download?ixid=MnwxMjA3fDB8MXx0b3BpY3x8SnBnNktpZGwtSGt8fHx8fDJ8fDE2NzgxMDYwODc&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/owl.jpg" alt="Photo of an owl"/> </div> 이미지와 해당 이미지의 후보 레이블인 `candidate_labels`를 파이프라인으로 전달합니다. 여기서는 이미지를 직접 전달하지만, 컴퓨터에 저장된 이미지의 경로나 url로 전달할 수도 있습니다. `candidate_labels`는 이 예시처럼 간단한 단어일 수도 있고 좀 더 설명적인 단어일 수도 있습니다. ```py >>> predictions = classifier(image, candidate_labels=["fox", "bear", "seagull", "owl"]) >>> predictions [{'score': 0.9996670484542847, 'label': 'owl'}, {'score': 0.000199399160919711, 'label': 'seagull'}, {'score': 7.392891711788252e-05, 'label': 'fox'}, {'score': 5.96074532950297e-05, 'label': 'bear'}] ``` ## 직접 제로샷(zero-shot) 이미지 분류하기[[zeroshot-image-classification-by-hand]] 이제 제로샷 이미지 분류 파이프라인 사용 방법을 살펴보았으니, 실행하는 방법을 살펴보겠습니다. [Hugging Face Hub에 업로드된 체크포인트](https://huggingface.co/models?pipeline_tag=zero-shot-image-classification&sort=downloads)에서 모델과 프로세서를 가져오는 것으로 시작합니다. 여기서는 이전과 동일한 체크포인트를 사용하겠습니다: ```py >>> from transformers import AutoProcessor, AutoModelForZeroShotImageClassification >>> model = AutoModelForZeroShotImageClassification.from_pretrained(checkpoint) >>> processor = AutoProcessor.from_pretrained(checkpoint) ``` 다른 이미지를 사용해 보겠습니다. ```py >>> from PIL import Image >>> import requests >>> url = "https://unsplash.com/photos/xBRQfR2bqNI/download?ixid=MnwxMjA3fDB8MXxhbGx8fHx8fHx8fHwxNjc4Mzg4ODEx&force=true&w=640" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/car.jpg" alt="Photo of a car"/> </div> 프로세서를 사용해 모델의 입력을 준비합니다. 프로세서는 모델의 입력으로 사용하기 위해 이미지 크기를 변환하고 정규화하는 이미지 프로세서와 텍스트 입력을 처리하는 토크나이저로 구성됩니다. ```py >>> candidate_labels = ["tree", "car", "bike", "cat"] >>> inputs = processor(images=image, text=candidate_labels, return_tensors="pt", padding=True) ``` 모델에 입력을 전달하고, 결과를 후처리합니다: ```py >>> import torch >>> with torch.no_grad(): ... outputs = model(**inputs) >>> logits = outputs.logits_per_image[0] >>> probs = logits.softmax(dim=-1).numpy() >>> scores = probs.tolist() >>> result = [ ... {"score": score, "label": candidate_label} ... for score, candidate_label in sorted(zip(probs, candidate_labels), key=lambda x: -x[0]) ... ] >>> result [{'score': 0.998572, 'label': 'car'}, {'score': 0.0010570387, 'label': 'bike'}, {'score': 0.0003393686, 'label': 'tree'}, {'score': 3.1572064e-05, 'label': 'cat'}] ```
transformers/docs/source/ko/tasks/zero_shot_image_classification.md/0
{ "file_path": "transformers/docs/source/ko/tasks/zero_shot_image_classification.md", "repo_id": "transformers", "token_count": 3889 }
49
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Treinamento distribuído com o 🤗 Accelerate O paralelismo surgiu como uma estratégia para treinar modelos grandes em hardware limitado e aumentar a velocidade de treinamento em várias órdens de magnitude. Na Hugging Face criamos a biblioteca [🤗 Accelerate](https://huggingface.co/docs/accelerate) para ajudar os usuários a treinar modelos 🤗 Transformers com qualquer configuração distribuída, seja em uma máquina com múltiplos GPUs ou em múltiplos GPUs distribuidos entre muitas máquinas. Neste tutorial, você irá aprender como personalizar seu laço de treinamento de PyTorch para poder treinar em ambientes distribuídos. ## Configuração De início, instale o 🤗 Accelerate: ```bash pip install accelerate ``` Logo, devemos importar e criar um objeto [`Accelerator`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator). O `Accelerator` detectará automáticamente a configuração distribuída disponível e inicializará todos os componentes necessários para o treinamento. Não há necessidade portanto de especificar o dispositivo onde deve colocar seu modelo. ```py >>> from accelerate import Accelerator >>> accelerator = Accelerator() ``` ## Preparando a aceleração Passe todos os objetos relevantes ao treinamento para o método [`prepare`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.prepare). Isto inclui os DataLoaders de treino e evaluação, um modelo e um otimizador: ```py >>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( ... train_dataloader, eval_dataloader, model, optimizer ... ) ``` ## Backward Por último, substitua o `loss.backward()` padrão em seu laço de treinamento com o método [`backward`](https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.backward) do 🤗 Accelerate: ```py >>> for epoch in range(num_epochs): ... for batch in train_dataloader: ... outputs = model(**batch) ... loss = outputs.loss ... accelerator.backward(loss) ... optimizer.step() ... lr_scheduler.step() ... optimizer.zero_grad() ... progress_bar.update(1) ``` Como se poder ver no seguinte código, só precisará adicionar quatro linhas de código ao seu laço de treinamento para habilitar o treinamento distribuído! ```diff + from accelerate import Accelerator from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler + accelerator = Accelerator() model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2) optimizer = AdamW(model.parameters(), lr=3e-5) - device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") - model.to(device) + train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare( + train_dataloader, eval_dataloader, model, optimizer + ) num_epochs = 3 num_training_steps = num_epochs * len(train_dataloader) lr_scheduler = get_scheduler( "linear", optimizer=optimizer, num_warmup_steps=0, num_training_steps=num_training_steps ) progress_bar = tqdm(range(num_training_steps)) model.train() for epoch in range(num_epochs): for batch in train_dataloader: - batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss - loss.backward() + accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) ``` ## Treinamento Quando tiver adicionado as linhas de código relevantes, inicie o treinamento por um script ou notebook como o Colab. ### Treinamento em um Script Se estiver rodando seu treinamento em um Script, execute o seguinte comando para criar e guardar um arquivo de configuração: ```bash accelerate config ``` Comece o treinamento com: ```bash accelerate launch train.py ``` ### Treinamento em um Notebook O 🤗 Accelerate pode rodar em um notebook, por exemplo, se estiver planejando usar as TPUs do Google Colab. Encapsule o código responsável pelo treinamento de uma função e passe-o ao `notebook_launcher`: ```py >>> from accelerate import notebook_launcher >>> notebook_launcher(training_function) ``` Para obter mais informações sobre o 🤗 Accelerate e suas numerosas funções, consulte a [documentación](https://huggingface.co/docs/accelerate/index).
transformers/docs/source/pt/accelerate.md/0
{ "file_path": "transformers/docs/source/pt/accelerate.md", "repo_id": "transformers", "token_count": 1904 }
50
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 创建自定义架构 [`AutoClass`](model_doc/auto) 自动推断模型架构并下载预训练的配置和权重。一般来说,我们建议使用 `AutoClass` 生成与检查点(checkpoint)无关的代码。希望对特定模型参数有更多控制的用户,可以仅从几个基类创建自定义的 🤗 Transformers 模型。这对于任何有兴趣学习、训练或试验 🤗 Transformers 模型的人可能特别有用。通过本指南,深入了解如何不通过 `AutoClass` 创建自定义模型。了解如何: - 加载并自定义模型配置。 - 创建模型架构。 - 为文本创建慢速和快速分词器。 - 为视觉任务创建图像处理器。 - 为音频任务创建特征提取器。 - 为多模态任务创建处理器。 ## 配置 [配置](main_classes/configuration) 涉及到模型的具体属性。每个模型配置都有不同的属性;例如,所有 NLP 模型都共享 `hidden_size`、`num_attention_heads`、 `num_hidden_layers` 和 `vocab_size` 属性。这些属性用于指定构建模型时的注意力头数量或隐藏层层数。 访问 [`DistilBertConfig`] 以更近一步了解 [DistilBERT](model_doc/distilbert),检查它的属性: ```py >>> from transformers import DistilBertConfig >>> config = DistilBertConfig() >>> print(config) DistilBertConfig { "activation": "gelu", "attention_dropout": 0.1, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` [`DistilBertConfig`] 显示了构建基础 [`DistilBertModel`] 所使用的所有默认属性。所有属性都可以进行自定义,为实验创造了空间。例如,您可以将默认模型自定义为: - 使用 `activation` 参数尝试不同的激活函数。 - 使用 `attention_dropout` 参数为 attention probabilities 使用更高的 dropout ratio。 ```py >>> my_config = DistilBertConfig(activation="relu", attention_dropout=0.4) >>> print(my_config) DistilBertConfig { "activation": "relu", "attention_dropout": 0.4, "dim": 768, "dropout": 0.1, "hidden_dim": 3072, "initializer_range": 0.02, "max_position_embeddings": 512, "model_type": "distilbert", "n_heads": 12, "n_layers": 6, "pad_token_id": 0, "qa_dropout": 0.1, "seq_classif_dropout": 0.2, "sinusoidal_pos_embds": false, "transformers_version": "4.16.2", "vocab_size": 30522 } ``` 预训练模型的属性可以在 [`~PretrainedConfig.from_pretrained`] 函数中进行修改: ```py >>> my_config = DistilBertConfig.from_pretrained("distilbert/distilbert-base-uncased", activation="relu", attention_dropout=0.4) ``` 当你对模型配置满意时,可以使用 [`~PretrainedConfig.save_pretrained`] 来保存配置。你的配置文件将以 JSON 文件的形式存储在指定的保存目录中: ```py >>> my_config.save_pretrained(save_directory="./your_model_save_path") ``` 要重用配置文件,请使用 [`~PretrainedConfig.from_pretrained`] 进行加载: ```py >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") ``` <Tip> 你还可以将配置文件保存为字典,甚至只保存自定义配置属性与默认配置属性之间的差异!有关更多详细信息,请参阅 [配置](main_classes/configuration) 文档。 </Tip> ## 模型 接下来,创建一个[模型](main_classes/models)。模型,也可泛指架构,定义了每一层网络的行为以及进行的操作。配置中的 `num_hidden_layers` 等属性用于定义架构。每个模型都共享基类 [`PreTrainedModel`] 和一些常用方法,例如调整输入嵌入的大小和修剪自注意力头。此外,所有模型都是 [`torch.nn.Module`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html)、[`tf.keras.Model`](https://www.tensorflow.org/api_docs/python/tf/keras/Model) 或 [`flax.linen.Module`](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) 的子类。这意味着模型与各自框架的用法兼容。 <frameworkcontent> <pt> 将自定义配置属性加载到模型中: ```py >>> from transformers import DistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/config.json") >>> model = DistilBertModel(my_config) ``` 这段代码创建了一个具有随机参数而不是预训练权重的模型。在训练该模型之前,您还无法将该模型用于任何用途。训练是一项昂贵且耗时的过程。通常来说,最好使用预训练模型来更快地获得更好的结果,同时仅使用训练所需资源的一小部分。 使用 [`~PreTrainedModel.from_pretrained`] 创建预训练模型: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` 当加载预训练权重时,如果模型是由 🤗 Transformers 提供的,将自动加载默认模型配置。然而,如果你愿意,仍然可以将默认模型配置的某些或者所有属性替换成你自己的配置: ```py >>> model = DistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </pt> <tf> 将自定义配置属性加载到模型中: ```py >>> from transformers import TFDistilBertModel >>> my_config = DistilBertConfig.from_pretrained("./your_model_save_path/my_config.json") >>> tf_model = TFDistilBertModel(my_config) ``` 这段代码创建了一个具有随机参数而不是预训练权重的模型。在训练该模型之前,您还无法将该模型用于任何用途。训练是一项昂贵且耗时的过程。通常来说,最好使用预训练模型来更快地获得更好的结果,同时仅使用训练所需资源的一小部分。 使用 [`~TFPreTrainedModel.from_pretrained`] 创建预训练模型: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased") ``` 当加载预训练权重时,如果模型是由 🤗 Transformers 提供的,将自动加载默认模型配置。然而,如果你愿意,仍然可以将默认模型配置的某些或者所有属性替换成自己的配置: ```py >>> tf_model = TFDistilBertModel.from_pretrained("distilbert/distilbert-base-uncased", config=my_config) ``` </tf> </frameworkcontent> ### 模型头(Model heads) 此时,你已经有了一个输出*隐藏状态*的基础 DistilBERT 模型。隐藏状态作为输入传递到模型头以生成最终输出。🤗 Transformers 为每个任务提供不同的模型头,只要模型支持该任务(即,您不能使用 DistilBERT 来执行像翻译这样的序列到序列任务)。 <frameworkcontent> <pt> 例如,[`DistilBertForSequenceClassification`] 是一个带有序列分类头(sequence classification head)的基础 DistilBERT 模型。序列分类头是池化输出之上的线性层。 ```py >>> from transformers import DistilBertForSequenceClassification >>> model = DistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 通过切换到不同的模型头,可以轻松地将此检查点重复用于其他任务。对于问答任务,你可以使用 [`DistilBertForQuestionAnswering`] 模型头。问答头(question answering head)与序列分类头类似,不同点在于它是隐藏状态输出之上的线性层。 ```py >>> from transformers import DistilBertForQuestionAnswering >>> model = DistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </pt> <tf> 例如,[`TFDistilBertForSequenceClassification`] 是一个带有序列分类头(sequence classification head)的基础 DistilBERT 模型。序列分类头是池化输出之上的线性层。 ```py >>> from transformers import TFDistilBertForSequenceClassification >>> tf_model = TFDistilBertForSequenceClassification.from_pretrained("distilbert/distilbert-base-uncased") ``` 通过切换到不同的模型头,可以轻松地将此检查点重复用于其他任务。对于问答任务,你可以使用 [`TFDistilBertForQuestionAnswering`] 模型头。问答头(question answering head)与序列分类头类似,不同点在于它是隐藏状态输出之上的线性层。 ```py >>> from transformers import TFDistilBertForQuestionAnswering >>> tf_model = TFDistilBertForQuestionAnswering.from_pretrained("distilbert/distilbert-base-uncased") ``` </tf> </frameworkcontent> ## 分词器 在将模型用于文本数据之前,你需要的最后一个基类是 [tokenizer](main_classes/tokenizer),它用于将原始文本转换为张量。🤗 Transformers 支持两种类型的分词器: - [`PreTrainedTokenizer`]:分词器的Python实现 - [`PreTrainedTokenizerFast`]:来自我们基于 Rust 的 [🤗 Tokenizer](https://huggingface.co/docs/tokenizers/python/latest/) 库的分词器。因为其使用了 Rust 实现,这种分词器类型的速度要快得多,尤其是在批量分词(batch tokenization)的时候。快速分词器还提供其他的方法,例如*偏移映射(offset mapping)*,它将标记(token)映射到其原始单词或字符。 这两种分词器都支持常用的方法,如编码和解码、添加新标记以及管理特殊标记。 <Tip warning={true}> 并非每个模型都支持快速分词器。参照这张 [表格](index#supported-frameworks) 查看模型是否支持快速分词器。 </Tip> 如果您训练了自己的分词器,则可以从*词表*文件创建一个分词器: ```py >>> from transformers import DistilBertTokenizer >>> my_tokenizer = DistilBertTokenizer(vocab_file="my_vocab_file.txt", do_lower_case=False, padding_side="left") ``` 请务必记住,自定义分词器生成的词表与预训练模型分词器生成的词表是不同的。如果使用预训练模型,则需要使用预训练模型的词表,否则输入将没有意义。 使用 [`DistilBertTokenizer`] 类创建具有预训练模型词表的分词器: ```py >>> from transformers import DistilBertTokenizer >>> slow_tokenizer = DistilBertTokenizer.from_pretrained("distilbert/distilbert-base-uncased") ``` 使用 [`DistilBertTokenizerFast`] 类创建快速分词器: ```py >>> from transformers import DistilBertTokenizerFast >>> fast_tokenizer = DistilBertTokenizerFast.from_pretrained("distilbert/distilbert-base-uncased") ``` <Tip> 默认情况下,[`AutoTokenizer`] 将尝试加载快速标记生成器。你可以通过在 `from_pretrained` 中设置 `use_fast=False` 以禁用此行为。 </Tip> ## 图像处理器 图像处理器用于处理视觉输入。它继承自 [`~image_processing_utils.ImageProcessingMixin`] 基类。 要使用它,需要创建一个与你使用的模型关联的图像处理器。例如,如果你使用 [ViT](model_doc/vit) 进行图像分类,可以创建一个默认的 [`ViTImageProcessor`]: ```py >>> from transformers import ViTImageProcessor >>> vit_extractor = ViTImageProcessor() >>> print(vit_extractor) ViTImageProcessor { "do_normalize": true, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.5, 0.5, 0.5 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": 2, "size": 224 } ``` <Tip> 如果您不需要进行任何自定义,只需使用 `from_pretrained` 方法加载模型的默认图像处理器参数。 </Tip> 修改任何 [`ViTImageProcessor`] 参数以创建自定义图像处理器: ```py >>> from transformers import ViTImageProcessor >>> my_vit_extractor = ViTImageProcessor(resample="PIL.Image.BOX", do_normalize=False, image_mean=[0.3, 0.3, 0.3]) >>> print(my_vit_extractor) ViTImageProcessor { "do_normalize": false, "do_resize": true, "image_processor_type": "ViTImageProcessor", "image_mean": [ 0.3, 0.3, 0.3 ], "image_std": [ 0.5, 0.5, 0.5 ], "resample": "PIL.Image.BOX", "size": 224 } ``` ## 特征提取器 特征提取器用于处理音频输入。它继承自 [`~feature_extraction_utils.FeatureExtractionMixin`] 基类,亦可继承 [`SequenceFeatureExtractor`] 类来处理音频输入。 要使用它,创建一个与你使用的模型关联的特征提取器。例如,如果你使用 [Wav2Vec2](model_doc/wav2vec2) 进行音频分类,可以创建一个默认的 [`Wav2Vec2FeatureExtractor`]: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor() >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": true, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 16000 } ``` <Tip> 如果您不需要进行任何自定义,只需使用 `from_pretrained` 方法加载模型的默认特征提取器参数。 </Tip> 修改任何 [`Wav2Vec2FeatureExtractor`] 参数以创建自定义特征提取器: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> w2v2_extractor = Wav2Vec2FeatureExtractor(sampling_rate=8000, do_normalize=False) >>> print(w2v2_extractor) Wav2Vec2FeatureExtractor { "do_normalize": false, "feature_extractor_type": "Wav2Vec2FeatureExtractor", "feature_size": 1, "padding_side": "right", "padding_value": 0.0, "return_attention_mask": false, "sampling_rate": 8000 } ``` ## 处理器 对于支持多模式任务的模型,🤗 Transformers 提供了一个处理器类,可以方便地将特征提取器和分词器等处理类包装到单个对象中。例如,让我们使用 [`Wav2Vec2Processor`] 来执行自动语音识别任务 (ASR)。 ASR 将音频转录为文本,因此您将需要一个特征提取器和一个分词器。 创建一个特征提取器来处理音频输入: ```py >>> from transformers import Wav2Vec2FeatureExtractor >>> feature_extractor = Wav2Vec2FeatureExtractor(padding_value=1.0, do_normalize=True) ``` 创建一个分词器来处理文本输入: ```py >>> from transformers import Wav2Vec2CTCTokenizer >>> tokenizer = Wav2Vec2CTCTokenizer(vocab_file="my_vocab_file.txt") ``` 将特征提取器和分词器合并到 [`Wav2Vec2Processor`] 中: ```py >>> from transformers import Wav2Vec2Processor >>> processor = Wav2Vec2Processor(feature_extractor=feature_extractor, tokenizer=tokenizer) ``` 通过两个基类 - 配置类和模型类 - 以及一个附加的预处理类(分词器、图像处理器、特征提取器或处理器),你可以创建 🤗 Transformers 支持的任何模型。 每个基类都是可配置的,允许你使用所需的特定属性。 你可以轻松设置模型进行训练或修改现有的预训练模型进行微调。
transformers/docs/source/zh/create_a_model.md/0
{ "file_path": "transformers/docs/source/zh/create_a_model.md", "repo_id": "transformers", "token_count": 8520 }
51
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Tokenizers的工具 并保留格式:此页面列出了tokenizers使用的所有实用函数,主要是类 [`~tokenization_utils_base.PreTrained TokenizerBase`] 实现了常用方法之间的 [`PreTrained Tokenizer`] 和 [`PreTrained TokenizerFast`] 以及混合类 [`~tokenization_utils_base.SpecialTokens Mixin`]。 其中大多数只有在您研究库中tokenizers的代码时才有用。 ## PreTrainedTokenizerBase [[autodoc]] tokenization_utils_base.PreTrainedTokenizerBase - __call__ - all ## SpecialTokensMixin [[autodoc]] tokenization_utils_base.SpecialTokensMixin ## Enums和namedtuples(命名元组) [[autodoc]] tokenization_utils_base.TruncationStrategy [[autodoc]] tokenization_utils_base.CharSpan [[autodoc]] tokenization_utils_base.TokenSpan
transformers/docs/source/zh/internal/tokenization_utils.md/0
{ "file_path": "transformers/docs/source/zh/internal/tokenization_utils.md", "repo_id": "transformers", "token_count": 531 }
52
<!--Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # Pipelines pipelines是使用模型进行推理的一种简单方法。这些pipelines是抽象了库中大部分复杂代码的对象,提供了一个专用于多个任务的简单API,包括专名识别、掩码语言建模、情感分析、特征提取和问答等。请参阅[任务摘要](../task_summary)以获取使用示例。 有两种pipelines抽象类需要注意: - [`pipeline`],它是封装所有其他pipelines的最强大的对象。 - 针对特定任务pipelines,适用于[音频](#audio)、[计算机视觉](#computer-vision)、[自然语言处理](#natural-language-processing)和[多模态](#multimodal)任务。 ## pipeline抽象类 *pipeline*抽象类是对所有其他可用pipeline的封装。它可以像任何其他pipeline一样实例化,但进一步提供额外的便利性。 简单调用一个项目: ```python >>> pipe = pipeline("text-classification") >>> pipe("This restaurant is awesome") [{'label': 'POSITIVE', 'score': 0.9998743534088135}] ``` 如果您想使用 [hub](https://huggingface.co) 上的特定模型,可以忽略任务,如果hub上的模型已经定义了该任务: ```python >>> pipe = pipeline(model="FacebookAI/roberta-large-mnli") >>> pipe("This restaurant is awesome") [{'label': 'NEUTRAL', 'score': 0.7313136458396912}] ``` 要在多个项目上调用pipeline,可以使用*列表*调用它。 ```python >>> pipe = pipeline("text-classification") >>> pipe(["This restaurant is awesome", "This restaurant is awful"]) [{'label': 'POSITIVE', 'score': 0.9998743534088135}, {'label': 'NEGATIVE', 'score': 0.9996669292449951}] ``` 为了遍历整个数据集,建议直接使用 `dataset`。这意味着您不需要一次性分配整个数据集,也不需要自己进行批处理。这应该与GPU上的自定义循环一样快。如果不是,请随时提出issue。 ```python import datasets from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset from tqdm.auto import tqdm pipe = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h", device=0) dataset = datasets.load_dataset("superb", name="asr", split="test") # KeyDataset (only *pt*) will simply return the item in the dict returned by the dataset item # as we're not interested in the *target* part of the dataset. For sentence pair use KeyPairDataset for out in tqdm(pipe(KeyDataset(dataset, "file"))): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` 为了方便使用,也可以使用生成器: ```python from transformers import pipeline pipe = pipeline("text-classification") def data(): while True: # This could come from a dataset, a database, a queue or HTTP request # in a server # Caveat: because this is iterative, you cannot use `num_workers > 1` variable # to use multiple threads to preprocess data. You can still have 1 thread that # does the preprocessing while the main runs the big inference yield "This is a test" for out in pipe(data()): print(out) # {"text": "NUMBER TEN FRESH NELLY IS WAITING ON YOU GOOD NIGHT HUSBAND"} # {"text": ....} # .... ``` [[autodoc]] pipeline ## Pipeline batching 所有pipeline都可以使用批处理。这将在pipeline使用其流处理功能时起作用(即传递列表或 `Dataset` 或 `generator` 时)。 ```python from transformers import pipeline from transformers.pipelines.pt_utils import KeyDataset import datasets dataset = datasets.load_dataset("imdb", name="plain_text", split="unsupervised") pipe = pipeline("text-classification", device=0) for out in pipe(KeyDataset(dataset, "text"), batch_size=8, truncation="only_first"): print(out) # [{'label': 'POSITIVE', 'score': 0.9998743534088135}] # Exactly the same output as before, but the content are passed # as batches to the model ``` <Tip warning={true}> 然而,这并不自动意味着性能提升。它可能是一个10倍的加速或5倍的减速,具体取决于硬件、数据和实际使用的模型。 主要是加速的示例: </Tip> ```python from transformers import pipeline from torch.utils.data import Dataset from tqdm.auto import tqdm pipe = pipeline("text-classification", device=0) class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): return "This is a test" dataset = MyDataset() for batch_size in [1, 8, 64, 256]: print("-" * 30) print(f"Streaming batch_size={batch_size}") for out in tqdm(pipe(dataset, batch_size=batch_size), total=len(dataset)): pass ``` ``` # On GTX 970 ------------------------------ Streaming no batching 100%|██████████████████████████████████████████████████████████████████████| 5000/5000 [00:26<00:00, 187.52it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:04<00:00, 1205.95it/s] ------------------------------ Streaming batch_size=64 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:02<00:00, 2478.24it/s] ------------------------------ Streaming batch_size=256 100%|█████████████████████████████████████████████████████████████████████| 5000/5000 [00:01<00:00, 2554.43it/s] (diminishing returns, saturated the GPU) ``` 主要是减速的示例: ```python class MyDataset(Dataset): def __len__(self): return 5000 def __getitem__(self, i): if i % 64 == 0: n = 100 else: n = 1 return "This is a test" * n ``` 与其他句子相比,这是一个非常长的句子。在这种情况下,**整个**批次将需要400个tokens的长度,因此整个批次将是 [64, 400] 而不是 [64, 4],从而导致较大的减速。更糟糕的是,在更大的批次上,程序会崩溃。 ``` ------------------------------ Streaming no batching 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:05<00:00, 183.69it/s] ------------------------------ Streaming batch_size=8 100%|█████████████████████████████████████████████████████████████████████| 1000/1000 [00:03<00:00, 265.74it/s] ------------------------------ Streaming batch_size=64 100%|██████████████████████████████████████████████████████████████████████| 1000/1000 [00:26<00:00, 37.80it/s] ------------------------------ Streaming batch_size=256 0%| | 0/1000 [00:00<?, ?it/s] Traceback (most recent call last): File "/home/nicolas/src/transformers/test.py", line 42, in <module> for out in tqdm(pipe(dataset, batch_size=256), total=len(dataset)): .... q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) RuntimeError: CUDA out of memory. Tried to allocate 376.00 MiB (GPU 0; 3.95 GiB total capacity; 1.72 GiB already allocated; 354.88 MiB free; 2.46 GiB reserved in total by PyTorch) ``` 对于这个问题,没有好的(通用)解决方案,效果可能因您的用例而异。经验法则如下: 对于用户,一个经验法则是: - **使用硬件测量负载性能。测量、测量、再测量。真实的数字是唯一的方法。** - 如果受到延迟的限制(进行推理的实时产品),不要进行批处理。 - 如果使用CPU,不要进行批处理。 - 如果您在GPU上处理的是吞吐量(您希望在大量静态数据上运行模型),则: - 如果对序列长度的大小没有概念("自然"数据),默认情况下不要进行批处理,进行测试并尝试逐渐添加,添加OOM检查以在失败时恢复(如果您不能控制序列长度,它将在某些时候失败)。 - 如果您的序列长度非常规律,那么批处理更有可能非常有趣,进行测试并推动它,直到出现OOM。 - GPU越大,批处理越有可能变得更有趣 - 一旦启用批处理,确保能够很好地处理OOM。 ## Pipeline chunk batching `zero-shot-classification` 和 `question-answering` 在某种意义上稍微特殊,因为单个输入可能会导致模型的多次前向传递。在正常情况下,这将导致 `batch_size` 参数的问题。 为了规避这个问题,这两个pipeline都有点特殊,它们是 `ChunkPipeline` 而不是常规的 `Pipeline`。简而言之: ```python preprocessed = pipe.preprocess(inputs) model_outputs = pipe.forward(preprocessed) outputs = pipe.postprocess(model_outputs) ``` 现在变成: ```python all_model_outputs = [] for preprocessed in pipe.preprocess(inputs): model_outputs = pipe.forward(preprocessed) all_model_outputs.append(model_outputs) outputs = pipe.postprocess(all_model_outputs) ``` 这对您的代码应该是非常直观的,因为pipeline的使用方式是相同的。 这是一个简化的视图,因为Pipeline可以自动处理批次!这意味着您不必担心您的输入实际上会触发多少次前向传递,您可以独立于输入优化 `batch_size`。前面部分的注意事项仍然适用。 ## Pipeline自定义 如果您想要重载特定的pipeline。 请随时为您手头的任务创建一个issue,Pipeline的目标是易于使用并支持大多数情况,因此 `transformers` 可能支持您的用例。 如果您想简单地尝试一下,可以: - 继承您选择的pipeline ```python class MyPipeline(TextClassificationPipeline): def postprocess(): # Your code goes here scores = scores * 100 # And here my_pipeline = MyPipeline(model=model, tokenizer=tokenizer, ...) # or if you use *pipeline* function, then: my_pipeline = pipeline(model="xxxx", pipeline_class=MyPipeline) ``` 这样就可以让您编写所有想要的自定义代码。 ## 实现一个pipeline [实现一个新的pipeline](../add_new_pipeline) ## 音频 可用于音频任务的pipeline包括以下几种。 ### AudioClassificationPipeline [[autodoc]] AudioClassificationPipeline - __call__ - all ### AutomaticSpeechRecognitionPipeline [[autodoc]] AutomaticSpeechRecognitionPipeline - __call__ - all ### TextToAudioPipeline [[autodoc]] TextToAudioPipeline - __call__ - all ### ZeroShotAudioClassificationPipeline [[autodoc]] ZeroShotAudioClassificationPipeline - __call__ - all ## 计算机视觉 可用于计算机视觉任务的pipeline包括以下几种。 ### DepthEstimationPipeline [[autodoc]] DepthEstimationPipeline - __call__ - all ### ImageClassificationPipeline [[autodoc]] ImageClassificationPipeline - __call__ - all ### ImageSegmentationPipeline [[autodoc]] ImageSegmentationPipeline - __call__ - all ### ImageToImagePipeline [[autodoc]] ImageToImagePipeline - __call__ - all ### ObjectDetectionPipeline [[autodoc]] ObjectDetectionPipeline - __call__ - all ### VideoClassificationPipeline [[autodoc]] VideoClassificationPipeline - __call__ - all ### ZeroShotImageClassificationPipeline [[autodoc]] ZeroShotImageClassificationPipeline - __call__ - all ### ZeroShotObjectDetectionPipeline [[autodoc]] ZeroShotObjectDetectionPipeline - __call__ - all ## 自然语言处理 可用于自然语言处理任务的pipeline包括以下几种。 ### FillMaskPipeline [[autodoc]] FillMaskPipeline - __call__ - all ### NerPipeline [[autodoc]] NerPipeline See [`TokenClassificationPipeline`] for all details. ### QuestionAnsweringPipeline [[autodoc]] QuestionAnsweringPipeline - __call__ - all ### SummarizationPipeline [[autodoc]] SummarizationPipeline - __call__ - all ### TableQuestionAnsweringPipeline [[autodoc]] TableQuestionAnsweringPipeline - __call__ ### TextClassificationPipeline [[autodoc]] TextClassificationPipeline - __call__ - all ### TextGenerationPipeline [[autodoc]] TextGenerationPipeline - __call__ - all ### Text2TextGenerationPipeline [[autodoc]] Text2TextGenerationPipeline - __call__ - all ### TokenClassificationPipeline [[autodoc]] TokenClassificationPipeline - __call__ - all ### TranslationPipeline [[autodoc]] TranslationPipeline - __call__ - all ### ZeroShotClassificationPipeline [[autodoc]] ZeroShotClassificationPipeline - __call__ - all ## 多模态 可用于多模态任务的pipeline包括以下几种。 ### DocumentQuestionAnsweringPipeline [[autodoc]] DocumentQuestionAnsweringPipeline - __call__ - all ### FeatureExtractionPipeline [[autodoc]] FeatureExtractionPipeline - __call__ - all ### ImageFeatureExtractionPipeline [[autodoc]] ImageFeatureExtractionPipeline - __call__ - all ### ImageToTextPipeline [[autodoc]] ImageToTextPipeline - __call__ - all ### ImageTextToTextPipeline [[autodoc]] ImageTextToTextPipeline - __call__ - all ### MaskGenerationPipeline [[autodoc]] MaskGenerationPipeline - __call__ - all ### VisualQuestionAnsweringPipeline [[autodoc]] VisualQuestionAnsweringPipeline - __call__ - all ## Parent class: `Pipeline` [[autodoc]] Pipeline
transformers/docs/source/zh/main_classes/pipelines.md/0
{ "file_path": "transformers/docs/source/zh/main_classes/pipelines.md", "repo_id": "transformers", "token_count": 6364 }
53
<!--Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # 推理pipeline [`pipeline`] 让使用[Hub](https://huggingface.co/models)上的任何模型进行任何语言、计算机视觉、语音以及多模态任务的推理变得非常简单。即使您对特定的模态没有经验,或者不熟悉模型的源码,您仍然可以使用[`pipeline`]进行推理!本教程将教您: - 如何使用[`pipeline`] 进行推理。 - 如何使用特定的`tokenizer`(分词器)或模型。 - 如何使用[`pipeline`] 进行音频、视觉和多模态任务的推理。 <Tip> 请查看[`pipeline`]文档以获取已支持的任务和可用参数的完整列表。 </Tip> ## Pipeline使用 虽然每个任务都有一个关联的[`pipeline`],但使用通用的抽象的[`pipeline`]更加简单,其中包含所有特定任务的`pipelines`。[`pipeline`]会自动加载一个默认模型和一个能够进行任务推理的预处理类。让我们以使用[`pipeline`]进行自动语音识别(ASR)或语音转文本为例。 1. 首先,创建一个[`pipeline`]并指定推理任务: ```py >>> from transformers import pipeline >>> transcriber = pipeline(task="automatic-speech-recognition") ``` 2. 将您的输入传递给[`pipeline`]。对于语音识别,这通常是一个音频输入文件: ```py >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'} ``` 您没有得到您期望的结果?可以在Hub上查看一些[最受欢迎的自动语音识别模型](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending) ,看看是否可以获得更好的转录。 让我们尝试来自 OpenAI 的[Whisper large-v2](https://huggingface.co/openai/whisper-large) 模型。Whisperb比Wav2Vec2晚2年发布,使用接近10倍的数据进行了训练。因此,它在大多数下游基准测试上击败了Wav2Vec2。 它还具有预测标点和大小写的附加优势,而Wav2Vec2则无法实现这些功能。 让我们在这里尝试一下,看看它的表现如何: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2") >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'} ``` 现在这个结果看起来更准确了!要进行深入的Wav2Vec2与Whisper比较,请参阅[音频变换器课程](https://huggingface.co/learn/audio-course/chapter5/asr_models)。 我们鼓励您在 Hub 上查看不同语言的模型,以及专业领域的模型等。您可以在Hub上直接查看并比较模型的结果,以确定是否适合或处理边缘情况是否比其他模型更好。如果您没有找到适用于您的用例的模型,您始终可以[训练](training)自己的模型! 如果您有多个输入,您可以将输入作为列表传递: ```py transcriber( [ "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac", "https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac", ] ) ``` `Pipelines`非常适合用于测试,因为从一个模型切换到另一个模型非常琐碎;但是,还有一些方法可以将它们优化后用于大型工作负载而不仅仅是测试。请查看以下指南,深入探讨如何迭代整个数据集或在Web服务器中使用`Pipelines`: * [在数据集上使用流水线](#using-pipelines-on-a-dataset) * [在Web服务器中使用流水线](./pipeline_webserver) ## 参数 [`pipeline`] 支持许多参数;有些是适用于特定任务的,而有些适用于所有`pipeline`。通常情况下,您可以在任何地方指定对应参数: ```py transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1) out = transcriber(...) # This will use `my_parameter=1`. out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`. out = transcriber(...) # This will go back to using `my_parameter=1`. ``` 让我们查看其中的三个重要参数: ### 设备 如果您使用 `device=n`,`pipeline`会自动将模型放在指定的设备上。无论您使用PyTorch还是Tensorflow,这都可以工作。 ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0) ``` 如果模型对于单个GPU来说过于庞大,并且您正在使用PyTorch,您可以设置 `device_map="auto"` 以自动确定如何加载和存储模型权重。使用 `device_map` 参数需要安装🤗 [Accelerate](https://huggingface.co/docs/accelerate) 软件包: ```bash pip install --upgrade accelerate ``` 以下代码会自动在各个设备上加载和存储模型权重: ```py transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto") ``` 请注意,如果传递了 `device_map="auto"`,在实例化您的 `pipeline` 时不需要添加 `device=device` 参数,否则可能会遇到一些意外的状况! ### 批量大小 默认情况下,`pipelines`不会进行批量推理,原因在[这里](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching)详细解释。因为批处理不一定更快,实际上在某些情况下可能会更慢。 但如果在您的用例中起作用,您可以使用: ```py transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2) audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)] texts = transcriber(audio_filenames) ``` 以上代码会在提供的4个音频文件上运行`pipeline`,它会将它们以2个一组的批次传递给模型(模型在GPU上,此时批处理更有可能有所帮助),而您无需编写额外的代码。输出应始终与没有批处理时收到的结果相一致。它只是一种帮助您更快地使用`pipeline`的方式。 `pipeline`也可以减轻一些批处理的复杂性,因为对于某些`pipeline`,需要将单个项目(如长音频文件)分成多个部分以供模型处理。`pipeline`为您执行这种[*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching)。 ### 任务特定参数 所有任务都提供了特定于任务的参数,这些参数提供额外的灵活性和选择,以帮助您完成工作。 例如,[`transformers.AutomaticSpeechRecognitionPipeline.__call__`] 方法具有一个 `return_timestamps` 参数,对于字幕视频似乎很有帮助: ```py >>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac") {'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]} ``` 正如您所看到的,模型推断出了文本,还输出了各个句子发音的**时间**。 每个任务都有许多可用的参数,因此请查看每个任务的API参考,以了解您可以进行哪些调整!例如,[`~transformers.AutomaticSpeechRecognitionPipeline`] 具有 `chunk_length_s` 参数,对于处理非常长的音频文件(例如,为整部电影或长达一小时的视频配字幕)非常有帮助,这通常是模型无法单独处理的: ```python >>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30, return_timestamps=True) >>> transcriber("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav") {'text': " Chapter 16. I might have told you of the beginning of this liaison in a few lines, but I wanted you to see every step by which we came. I, too, agree to whatever Marguerite wished, Marguerite to be unable to live apart from me. It was the day after the evening... ``` 如果您找不到一个真正有帮助的参数,欢迎[提出请求](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)! ## 在数据集上使用pipelines `pipelines`也可以对大型数据集进行推理。我们建议使用迭代器来完成这一任务,这是最简单的方法: ```py def data(): for i in range(1000): yield f"My example {i}" pipe = pipeline(model="openai-community/gpt2", device=0) generated_characters = 0 for out in pipe(data()): generated_characters += len(out[0]["generated_text"]) ``` 迭代器 `data()` 会产生每个结果,`pipelines`会自动识别输入为可迭代对象,并在GPU上处理数据的同时开始获取数据(在底层使用[DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader))。这一点非常重要,因为您不必为整个数据集分配内存,可以尽可能快地将数据传送到GPU。 由于批处理可以加速处理,因此在这里尝试调整 `batch_size` 参数可能会很有用。 迭代数据集的最简单方法就是从🤗 [Datasets](https://github.com/huggingface/datasets/) 中加载数据集: ```py # KeyDataset is a util that will just output the item we're interested in. from transformers.pipelines.pt_utils import KeyDataset from datasets import load_dataset pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0) dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]") for out in pipe(KeyDataset(dataset, "audio")): print(out) ``` ## 在Web服务器上使用pipelines <Tip> 创建推理引擎是一个复杂的主题,值得有自己的页面。 </Tip> [链接](./pipeline_webserver) ## 视觉流水线 对于视觉任务,使用[`pipeline`] 几乎是相同的。 指定您的任务并将图像传递给分类器。图像可以是链接、本地路径或base64编码的图像。例如,下面显示的是哪种品种的猫? ![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg) ```py >>> from transformers import pipeline >>> vision_classifier = pipeline(model="google/vit-base-patch16-224") >>> preds = vision_classifier( ... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg" ... ) >>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds] >>> preds [{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}] ``` ## 文本流水线 对于NLP任务,使用[`pipeline`] 几乎是相同的。 ```py >>> from transformers import pipeline >>> # This model is a `zero-shot-classification` model. >>> # It will classify text, except you are free to choose any label you might imagine >>> classifier = pipeline(model="facebook/bart-large-mnli") >>> classifier( ... "I have a problem with my iphone that needs to be resolved asap!!", ... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], ... ) {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} ``` ## 多模态流水线 [`pipeline`] 支持多个模态。例如,视觉问题回答(VQA)任务结合了文本和图像。请随意使用您喜欢的任何图像链接和您想要问关于该图像的问题。图像可以是URL或图像的本地路径。 例如,如果您使用这个[invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png): ```py >>> from transformers import pipeline >>> vqa = pipeline(model="impira/layoutlm-document-qa") >>> output = vqa( ... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", ... question="What is the invoice number?", ... ) >>> output[0]["score"] = round(output[0]["score"], 3) >>> output [{'score': 0.425, 'answer': 'us-001', 'start': 16, 'end': 16}] ``` <Tip> 要运行上面的示例,除了🤗 Transformers之外,您需要安装[`pytesseract`](https://pypi.org/project/pytesseract/)。 ```bash sudo apt install -y tesseract-ocr pip install pytesseract ``` </Tip> ## 在大模型上使用🤗 `accelerate`和`pipeline`: 您可以轻松地使用🤗 `accelerate`在大模型上运行 `pipeline`!首先确保您已经使用 `pip install accelerate` 安装了 `accelerate`。 首先使用 `device_map="auto"` 加载您的模型!我们将在示例中使用 `facebook/opt-1.3b`。 ```py # pip install accelerate import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto") output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` 如果安装 `bitsandbytes` 并添加参数 `load_in_8bit=True`,您还可以传递8位加载的模型。 ```py # pip install accelerate bitsandbytes import torch from transformers import pipeline pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True}) output = pipe("This is a cool example!", do_sample=True, top_p=0.95) ``` 请注意,您可以将`checkpoint `替换为任何支持大模型加载的Hugging Face模型,比如BLOOM!
transformers/docs/source/zh/pipeline_tutorial.md/0
{ "file_path": "transformers/docs/source/zh/pipeline_tutorial.md", "repo_id": "transformers", "token_count": 7471 }
54
<!--- Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Automatic Speech Recognition - Flax Examples ## Sequence to Sequence The script [`run_flax_speech_recognition_seq2seq.py`](https://github.com/huggingface/transformers/blob/main/examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py) can be used to fine-tune any [Flax Speech Sequence-to-Sequence Model](https://huggingface.co/docs/transformers/main/en/model_doc/auto#transformers.FlaxAutoModelForSpeechSeq2Seq) for automatic speech recognition on one of the [official speech recognition datasets](https://huggingface.co/datasets?task_ids=task_ids:automatic-speech-recognition) or a custom dataset. This includes the Whisper model from OpenAI, or a warm-started Speech-Encoder-Decoder Model, an example for which is included below. ### Whisper Model We can load all components of the Whisper model directly from the pretrained checkpoint, including the pretrained model weights, feature extractor and tokenizer. We simply have to specify the id of fine-tuning dataset and the necessary training hyperparameters. The following example shows how to fine-tune the [Whisper small](https://huggingface.co/openai/whisper-small) checkpoint on the Hindi subset of the [Common Voice 13](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) dataset. Note that before running this script you must accept the dataset's [terms of use](https://huggingface.co/datasets/mozilla-foundation/common_voice_13_0) and register your Hugging Face Hub token on your device by running `huggingface-hub login`. ```bash python run_flax_speech_recognition_seq2seq.py \ --model_name_or_path="openai/whisper-small" \ --dataset_name="mozilla-foundation/common_voice_13_0" \ --dataset_config_name="hi" \ --language="hindi" \ --train_split_name="train+validation" \ --eval_split_name="test" \ --output_dir="./whisper-small-hi-flax" \ --per_device_train_batch_size="16" \ --per_device_eval_batch_size="16" \ --num_train_epochs="10" \ --learning_rate="1e-4" \ --warmup_steps="500" \ --logging_steps="25" \ --generation_max_length="40" \ --preprocessing_num_workers="32" \ --dataloader_num_workers="32" \ --max_duration_in_seconds="30" \ --text_column_name="sentence" \ --overwrite_output_dir \ --do_train \ --do_eval \ --predict_with_generate \ --push_to_hub \ --use_auth_token ``` On a TPU v4-8, training should take approximately 25 minutes, with a final cross-entropy loss of 0.02 and word error rate of **34%**. See the checkpoint [sanchit-gandhi/whisper-small-hi-flax](https://huggingface.co/sanchit-gandhi/whisper-small-hi-flax) for an example training run.
transformers/examples/flax/speech-recognition/README.md/0
{ "file_path": "transformers/examples/flax/speech-recognition/README.md", "repo_id": "transformers", "token_count": 1039 }
55
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Legacy examples This folder contains examples which are not actively maintained (mostly contributed by the community). Using these examples together with a recent version of the library usually requires to make small (sometimes big) adaptations to get the scripts working.
transformers/examples/legacy/README.md/0
{ "file_path": "transformers/examples/legacy/README.md", "repo_id": "transformers", "token_count": 204 }
56
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fine-tuning the library models for question-answering.""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import transformers from transformers import ( AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, DataCollatorWithPadding, HfArgumentParser, SquadDataset, Trainer, TrainingArguments, ) from transformers import SquadDataTrainingArguments as DataTrainingArguments from transformers.trainer_utils import is_main_process logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to use fast tokenization."}) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use" " --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Prepare Question-Answering task # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets is_language_sensitive = hasattr(model.config, "lang2id") train_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir ) if training_args.do_train else None ) eval_dataset = ( SquadDataset( data_args, tokenizer=tokenizer, mode="dev", is_language_sensitive=is_language_sensitive, cache_dir=model_args.cache_dir, ) if training_args.do_eval else None ) # Data collator data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8) if training_args.fp16 else None # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/legacy/question-answering/run_squad_trainer.py/0
{ "file_path": "transformers/examples/legacy/question-answering/run_squad_trainer.py", "repo_id": "transformers", "token_count": 2569 }
57
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeq2SeqDataset, Seq2SeqDataset BERT_BASE_CASED = "google-bert/bert-base-cased" PEGASUS_XSUM = "google/pegasus-xsum" ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestAll(TestCasePlus): @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) @slow def test_seq2seq_dataset_truncation(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) max_src_len = 4 max_tgt_len = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated src_lang, tgt_lang = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. train_dataset = Seq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=max_src_len, max_target_length=max_tgt_len, # ignored src_lang=src_lang, tgt_lang=tgt_lang, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(batch, dict) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place batch["decoder_input_ids"] = shift_tokens_right(batch["labels"], tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def test_legacy_dataset_truncation(self, tok): tokenizer = AutoTokenizer.from_pretrained(tok) tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) max_len_source = max(len(tokenizer.encode(a)) for a in ARTICLES) max_len_target = max(len(tokenizer.encode(a)) for a in SUMMARIES) trunc_target = 4 train_dataset = LegacySeq2SeqDataset( tokenizer, data_dir=tmp_dir, type_path="train", max_source_length=20, max_target_length=trunc_target, ) dataloader = DataLoader(train_dataset, batch_size=2, collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 20 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def test_pack_dataset(self): tokenizer = AutoTokenizer.from_pretrained("facebook/mbart-large-cc25") tmp_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) orig_examples = tmp_dir.joinpath("train.source").open().readlines() save_dir = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(tokenizer, tmp_dir, 128, save_dir) orig_paths = {x.name for x in tmp_dir.iterdir()} new_paths = {x.name for x in save_dir.iterdir()} packed_examples = save_dir.joinpath("train.source").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(packed_examples) < len(orig_examples) assert len(packed_examples) == 1 assert len(packed_examples[0]) == sum(len(x) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE, reason="This test requires fairseq") def test_dynamic_batch_size(self): if not FAIRSEQ_AVAILABLE: return ds, max_tokens, tokenizer = self._get_dataset(max_len=64) required_batch_size_multiple = 64 batch_sampler = ds.make_dynamic_sampler(max_tokens, required_batch_size_multiple=required_batch_size_multiple) batch_sizes = [len(x) for x in batch_sampler] assert len(set(batch_sizes)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(batch_sizes) == len(ds) # no dropped or added examples data_loader = DataLoader(ds, batch_sampler=batch_sampler, collate_fn=ds.collate_fn, num_workers=2) failures = [] num_src_per_batch = [] for batch in data_loader: src_shape = batch["input_ids"].shape bs = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple num_src_tokens = np.product(batch["input_ids"].shape) num_src_per_batch.append(num_src_tokens) if num_src_tokens > (max_tokens * 1.1): failures.append(num_src_tokens) assert num_src_per_batch[0] == max(num_src_per_batch) if failures: raise AssertionError(f"too many tokens in {len(failures)} batches") def test_sortish_sampler_reduces_padding(self): ds, _, tokenizer = self._get_dataset(max_len=512) bs = 2 sortish_sampler = ds.make_sortish_sampler(bs, shuffle=False) naive_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2) sortish_dl = DataLoader(ds, batch_size=bs, collate_fn=ds.collate_fn, num_workers=2, sampler=sortish_sampler) pad = tokenizer.pad_token_id def count_pad_tokens(data_loader, k="input_ids"): return [batch[k].eq(pad).sum().item() for batch in data_loader] assert sum(count_pad_tokens(sortish_dl, k="labels")) < sum(count_pad_tokens(naive_dl, k="labels")) assert sum(count_pad_tokens(sortish_dl)) < sum(count_pad_tokens(naive_dl)) assert len(sortish_dl) == len(naive_dl) def _get_dataset(self, n_obs=1000, max_len=128): if os.getenv("USE_REAL_DATA", False): data_dir = "examples/seq2seq/wmt_en_ro" max_tokens = max_len * 2 * 64 if not Path(data_dir).joinpath("train.len").exists(): save_len_file(MARIAN_TINY, data_dir) else: data_dir = "examples/seq2seq/test_data/wmt_en_ro" max_tokens = max_len * 4 save_len_file(MARIAN_TINY, data_dir) tokenizer = AutoTokenizer.from_pretrained(MARIAN_TINY) ds = Seq2SeqDataset( tokenizer, data_dir=data_dir, type_path="train", max_source_length=max_len, max_target_length=max_len, n_obs=n_obs, ) return ds, max_tokens, tokenizer def test_distributed_sortish_sampler_splits_indices_between_procs(self): ds, max_tokens, tokenizer = self._get_dataset() ids1 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=0, add_extra_examples=False)) ids2 = set(DistributedSortishSampler(ds, 256, num_replicas=2, rank=1, add_extra_examples=False)) assert ids1.intersection(ids2) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ], ) def test_dataset_kwargs(self, tok_name): tokenizer = AutoTokenizer.from_pretrained(tok_name, use_fast=False) if tok_name == MBART_TINY: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, src_lang="EN", tgt_lang="FR", ) kwargs = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: train_dataset = Seq2SeqDataset( tokenizer, data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()), type_path="train", max_source_length=4, max_target_length=8, ) kwargs = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(kwargs) == 1 if tok_name == BART_TINY else len(kwargs) == 0
transformers/examples/legacy/seq2seq/old_test_datasets.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/old_test_datasets.py", "repo_id": "transformers", "token_count": 5152 }
58
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass, field from typing import Optional from seq2seq_trainer import arg_to_scheduler from transformers import TrainingArguments logger = logging.getLogger(__name__) @dataclass class Seq2SeqTrainingArguments(TrainingArguments): """ Parameters: label_smoothing (:obj:`float`, `optional`, defaults to 0): The label smoothing epsilon to apply (if not zero). sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to SortishSampler or not. It sorts the inputs according to lengths in-order to minimizing the padding size. predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). """ label_smoothing: Optional[float] = field( default=0.0, metadata={"help": "The label smoothing epsilon to apply (if not zero)."} ) sortish_sampler: bool = field(default=False, metadata={"help": "Whether to SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} ) adafactor: bool = field(default=False, metadata={"help": "whether to use adafactor"}) encoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Encoder layer dropout probability. Goes into model.config."} ) decoder_layerdrop: Optional[float] = field( default=None, metadata={"help": "Decoder layer dropout probability. Goes into model.config."} ) dropout: Optional[float] = field(default=None, metadata={"help": "Dropout probability. Goes into model.config."}) attention_dropout: Optional[float] = field( default=None, metadata={"help": "Attention dropout probability. Goes into model.config."} ) lr_scheduler: Optional[str] = field( default="linear", metadata={"help": f"Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}"}, )
transformers/examples/legacy/seq2seq/seq2seq_training_args.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/seq2seq_training_args.py", "repo_id": "transformers", "token_count": 888 }
59
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A simple launcher script for TPU training Inspired by https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py :: >>> python xla_spawn.py --num_cores=NUM_CORES_YOU_HAVE YOUR_TRAINING_SCRIPT.py (--arg1 --arg2 --arg3 and all other arguments of your training script) """ import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def parse_args(): """ Helper function parsing the command line options @retval ArgumentParser """ parser = ArgumentParser( description=( "PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes" ) ) # Optional arguments for the launch helper parser.add_argument("--num_cores", type=int, default=1, help="Number of TPU cores to use (1 or 8).") # positional parser.add_argument( "training_script", type=str, help=( "The full path to the single TPU training " "program/script to be launched in parallel, " "followed by all the arguments for the " "training script" ), ) # rest from the training program parser.add_argument("training_script_args", nargs=REMAINDER) return parser.parse_args() def main(): args = parse_args() # Import training_script as a module. script_fpath = Path(args.training_script) sys.path.append(str(script_fpath.parent.resolve())) mod_name = script_fpath.stem mod = importlib.import_module(mod_name) # Patch sys.argv sys.argv = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores)] xmp.spawn(mod._mp_fn, args=(), nprocs=args.num_cores) if __name__ == "__main__": main()
transformers/examples/legacy/seq2seq/xla_spawn.py/0
{ "file_path": "transformers/examples/legacy/seq2seq/xla_spawn.py", "repo_id": "transformers", "token_count": 887 }
60
from transformers.models.llama.modeling_llama import LlamaModel # Check that we can correctly change the prefix (here add Text part at the end of the name) class Multimodal1TextModel(LlamaModel): pass
transformers/examples/modular-transformers/modular_multimodal1.py/0
{ "file_path": "transformers/examples/modular-transformers/modular_multimodal1.py", "repo_id": "transformers", "token_count": 62 }
61
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # VisionTextDualEncoder and CLIP model training examples The following example showcases how to train a CLIP-like vision-text dual encoder model using a pre-trained vision and text encoder. Such a model can be used for natural language image search and potentially zero-shot image classification. The model is inspired by [CLIP](https://openai.com/blog/clip/), introduced by Alec Radford et al. The idea is to train a vision encoder and a text encoder jointly to project the representation of images and their captions into the same embedding space, such that the caption embeddings are located near the embeddings of the images they describe. ### Download COCO dataset (2017) This example uses COCO dataset (2017) through a custom dataset script, which requires users to manually download the COCO dataset before training. ```bash mkdir data cd data wget http://images.cocodataset.org/zips/train2017.zip wget http://images.cocodataset.org/zips/val2017.zip wget http://images.cocodataset.org/zips/test2017.zip wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip wget http://images.cocodataset.org/annotations/image_info_test2017.zip cd .. ``` Having downloaded COCO dataset manually you should be able to load with the `ydshieh/coc_dataset_script` dataset loading script: ```py import os import datasets COCO_DIR = os.path.join(os.getcwd(), "data") ds = datasets.load_dataset("ydshieh/coco_dataset_script", "2017", data_dir=COCO_DIR) ``` ### Create a model from a vision encoder model and a text encoder model Next, we create a [VisionTextDualEncoderModel](https://huggingface.co/docs/transformers/model_doc/vision-text-dual-encoder#visiontextdualencoder). The `VisionTextDualEncoderModel` class lets you load any vision and text encoder model to create a dual encoder. Here is an example of how to load the model using pre-trained vision and text models. ```python3 from transformers import ( VisionTextDualEncoderModel, VisionTextDualEncoderProcessor, AutoTokenizer, AutoImageProcessor ) model = VisionTextDualEncoderModel.from_vision_text_pretrained( "openai/clip-vit-base-patch32", "FacebookAI/roberta-base" ) tokenizer = AutoTokenizer.from_pretrained("FacebookAI/roberta-base") image_processor = AutoImageProcessor.from_pretrained("openai/clip-vit-base-patch32") processor = VisionTextDualEncoderProcessor(image_processor, tokenizer) # save the model and processor model.save_pretrained("clip-roberta") processor.save_pretrained("clip-roberta") ``` This loads both the text and vision encoders using pre-trained weights, the projection layers are randomly initialized except for CLIP's vision model. If you use CLIP to initialize the vision model then the vision projection weights are also loaded using the pre-trained weights. ### Train the model Finally, we can run the example script to train the model: ```bash python examples/pytorch/contrastive-image-text/run_clip.py \ --output_dir ./clip-roberta-finetuned \ --model_name_or_path ./clip-roberta \ --data_dir $PWD/data \ --dataset_name ydshieh/coco_dataset_script \ --dataset_config_name=2017 \ --image_column image_path \ --caption_column caption \ --remove_unused_columns=False \ --do_train --do_eval \ --per_device_train_batch_size="64" \ --per_device_eval_batch_size="64" \ --learning_rate="5e-5" --warmup_steps="0" --weight_decay 0.1 \ --overwrite_output_dir \ --push_to_hub ```
transformers/examples/pytorch/contrastive-image-text/README.md/0
{ "file_path": "transformers/examples/pytorch/contrastive-image-text/README.md", "repo_id": "transformers", "token_count": 1268 }
62
<!--- Copyright 2020 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> ## Language model training Fine-tuning (or training from scratch) the library models for language modeling on a text dataset for GPT, GPT-2, ALBERT, BERT, DistilBERT, RoBERTa, XLNet... GPT and GPT-2 are trained or fine-tuned using a causal language modeling (CLM) loss while ALBERT, BERT, DistilBERT and RoBERTa are trained or fine-tuned using a masked language modeling (MLM) loss. XLNet uses permutation language modeling (PLM), you can find more information about the differences between those objectives in our [model summary](https://huggingface.co/transformers/model_summary.html). There are two sets of scripts provided. The first set leverages the Trainer API. The second set with `no_trainer` in the suffix uses a custom training loop and leverages the 🤗 Accelerate library . Both sets use the 🤗 Datasets library. You can easily customize them to your needs if you need extra processing on your datasets. **Note:** The old script `run_language_modeling.py` is still available [here](https://github.com/huggingface/transformers/blob/main/examples/legacy/run_language_modeling.py). The following examples, will run on datasets hosted on our [hub](https://huggingface.co/datasets) or with your own text files for training and validation. We give examples of both below. ### GPT-2/GPT and causal language modeling The following example fine-tunes GPT-2 on WikiText-2. We're using the raw WikiText-2 (no tokens were replaced before the tokenization). The loss here is that of causal language modeling. ```bash python run_clm.py \ --model_name_or_path openai-community/gpt2 \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` This takes about half an hour to train on a single K80 GPU and about one minute for the evaluation to run. It reaches a score of ~20 perplexity once fine-tuned on the dataset. To run on your own training and validation files, use the following command: ```bash python run_clm.py \ --model_name_or_path openai-community/gpt2 \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_clm_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: ```bash python run_clm_no_trainer.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --model_name_or_path openai-community/gpt2 \ --output_dir /tmp/test-clm ``` ### GPT-2/GPT and causal language modeling with fill-in-the middle objective The following example fine-tunes GPT-2 on WikiText-2 but using the Fill-in-middle training objective. FIM objective was proposed in [Efficient Training of Language Models to Fill in the Middle](https://arxiv.org/abs/2207.14255). They showed that autoregressive language models can learn to infill text after applying a straightforward transformation to the dataset, which simply moves a span of text from the middle of a document to its end. We're using the raw WikiText-2 (no tokens were replaced before the tokenization). The loss here is that of causal language modeling. ```bash python run_fim.py \ --model_name_or_path gpt2 \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --fim_rate 0.5 \ --fim_spm_rate 0.2 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` To run on your own training and validation files, use the following command: ```bash python run_fim.py \ --model_name_or_path gpt2 \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --fim_rate 0.5 \ --fim_spm_rate 0.2 \ --do_train \ --do_eval \ --output_dir /tmp/test-clm ``` This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_fim_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: ```bash python run_fim_no_trainer.py \ --model_name_or_path gpt2 \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --model_name_or_path gpt2 \ --fim_rate 0.5 \ --fim_spm_rate 0.2 \ --output_dir /tmp/test-clm ``` **Note**: Passing in FIM rate as `0.5` means that FIM transformations will be applied to the dataset with a probability of 50%. Whereas passing in FIM SPM rate as `0.2` means that 20% of FIM transformations will use SPM (or Suffix-Prefix-Middle) and the remaining 80% will use PSM (or Prefix-Suffix-Middle) mode of transformation. ### RoBERTa/BERT/DistilBERT and masked language modeling The following example fine-tunes RoBERTa on WikiText-2. Here too, we're using the raw WikiText-2. The loss is different as BERT/RoBERTa have a bidirectional mechanism; we're therefore using the same loss that was used during their pre-training: masked language modeling. In accordance to the RoBERTa paper, we use dynamic masking rather than static masking. The model may, therefore, converge slightly slower (over-fitting takes more epochs). ```bash python run_mlm.py \ --model_name_or_path FacebookAI/roberta-base \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-mlm ``` To run on your own training and validation files, use the following command: ```bash python run_mlm.py \ --model_name_or_path FacebookAI/roberta-base \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-mlm ``` If your dataset is organized with one sample per line, you can use the `--line_by_line` flag (otherwise the script concatenates all texts and then splits them in blocks of the same length). This uses the built in HuggingFace `Trainer` for training. If you want to use a custom training loop, you can utilize or adapt the `run_mlm_no_trainer.py` script. Take a look at the script for a list of supported arguments. An example is shown below: ```bash python run_mlm_no_trainer.py \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --model_name_or_path FacebookAI/roberta-base \ --output_dir /tmp/test-mlm ``` **Note:** On TPU, you should use the flag `--pad_to_max_length` in conjunction with the `--line_by_line` flag to make sure all your batches have the same length. ### Whole word masking This part was moved to `examples/research_projects/mlm_wwm`. ### XLNet and permutation language modeling XLNet uses a different training objective, which is permutation language modeling. It is an autoregressive method to learn bidirectional contexts by maximizing the expected likelihood over all permutations of the input sequence factorization order. We use the `--plm_probability` flag to define the ratio of length of a span of masked tokens to surrounding context length for permutation language modeling. The `--max_span_length` flag may also be used to limit the length of a span of masked tokens used for permutation language modeling. Here is how to fine-tune XLNet on wikitext-2: ```bash python run_plm.py \ --model_name_or_path=xlnet/xlnet-base-cased \ --dataset_name wikitext \ --dataset_config_name wikitext-2-raw-v1 \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-plm ``` To fine-tune it on your own training and validation file, run: ```bash python run_plm.py \ --model_name_or_path=xlnet/xlnet-base-cased \ --train_file path_to_train_file \ --validation_file path_to_validation_file \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --do_train \ --do_eval \ --output_dir /tmp/test-plm ``` If your dataset is organized with one sample per line, you can use the `--line_by_line` flag (otherwise the script concatenates all texts and then splits them in blocks of the same length). **Note:** On TPU, you should use the flag `--pad_to_max_length` in conjunction with the `--line_by_line` flag to make sure all your batches have the same length. ## Streaming To use the streaming dataset mode which can be very useful for large datasets, add `--streaming` to the command line. This is supported by `run_mlm.py`, `run_clm.py` and `run_fim.py`. Make sure to adapt the other scripts to your use case by taking inspiration from them. ## Low Cpu Memory Usage To use low cpu memory mode which can be very useful for LLM, add `--low_cpu_mem_usage` to the command line. This is currently supported by `run_clm.py`,`run_mlm.py`, `run_plm.py`, `run_fim.py`, `run_mlm_no_trainer.py`, `run_clm_no_trainer.py` and `run_fim_no_trainer.py`. ## Creating a model on the fly When training a model from scratch, configuration values may be overridden with the help of `--config_overrides`: ```bash python run_clm.py --model_type gpt2 --tokenizer_name openai-community/gpt2 \ --config_overrides="n_embd=1024,n_head=16,n_layer=48,n_positions=102" \ [...] ``` This feature is only available in `run_clm.py`, `run_plm.py`, `run_mlm.py` and `run_fim.py`.
transformers/examples/pytorch/language-modeling/README.md/0
{ "file_path": "transformers/examples/pytorch/language-modeling/README.md", "repo_id": "transformers", "token_count": 3477 }
63
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and """Finetuning any 🤗 Transformers model supported by AutoModelForObjectDetection for object detection leveraging the Trainer API.""" import logging import os import sys from dataclasses import dataclass, field from functools import partial from typing import Any, List, Mapping, Optional, Tuple, Union import albumentations as A import numpy as np import torch from datasets import load_dataset from torchmetrics.detection.mean_ap import MeanAveragePrecision import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForObjectDetection, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.image_processing_utils import BatchFeature from transformers.image_transforms import center_to_corners_format from transformers.trainer import EvalPrediction from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/object-detection/requirements.txt") @dataclass class ModelOutput: logits: torch.Tensor pred_boxes: torch.Tensor def format_image_annotations_as_coco( image_id: str, categories: List[int], areas: List[float], bboxes: List[Tuple[float]] ) -> dict: """Format one set of image annotations to the COCO format Args: image_id (str): image id. e.g. "0001" categories (List[int]): list of categories/class labels corresponding to provided bounding boxes areas (List[float]): list of corresponding areas to provided bounding boxes bboxes (List[Tuple[float]]): list of bounding boxes provided in COCO format ([center_x, center_y, width, height] in absolute coordinates) Returns: dict: { "image_id": image id, "annotations": list of formatted annotations } """ annotations = [] for category, area, bbox in zip(categories, areas, bboxes): formatted_annotation = { "image_id": image_id, "category_id": category, "iscrowd": 0, "area": area, "bbox": list(bbox), } annotations.append(formatted_annotation) return { "image_id": image_id, "annotations": annotations, } def convert_bbox_yolo_to_pascal(boxes: torch.Tensor, image_size: Tuple[int, int]) -> torch.Tensor: """ Convert bounding boxes from YOLO format (x_center, y_center, width, height) in range [0, 1] to Pascal VOC format (x_min, y_min, x_max, y_max) in absolute coordinates. Args: boxes (torch.Tensor): Bounding boxes in YOLO format image_size (Tuple[int, int]): Image size in format (height, width) Returns: torch.Tensor: Bounding boxes in Pascal VOC format (x_min, y_min, x_max, y_max) """ # convert center to corners format boxes = center_to_corners_format(boxes) # convert to absolute coordinates height, width = image_size boxes = boxes * torch.tensor([[width, height, width, height]]) return boxes def augment_and_transform_batch( examples: Mapping[str, Any], transform: A.Compose, image_processor: AutoImageProcessor, return_pixel_mask: bool = False, ) -> BatchFeature: """Apply augmentations and format annotations in COCO format for object detection task""" images = [] annotations = [] for image_id, image, objects in zip(examples["image_id"], examples["image"], examples["objects"]): image = np.array(image.convert("RGB")) # apply augmentations output = transform(image=image, bboxes=objects["bbox"], category=objects["category"]) images.append(output["image"]) # format annotations in COCO format formatted_annotations = format_image_annotations_as_coco( image_id, output["category"], objects["area"], output["bboxes"] ) annotations.append(formatted_annotations) # Apply the image processor transformations: resizing, rescaling, normalization result = image_processor(images=images, annotations=annotations, return_tensors="pt") if not return_pixel_mask: result.pop("pixel_mask", None) return result def collate_fn(batch: List[BatchFeature]) -> Mapping[str, Union[torch.Tensor, List[Any]]]: data = {} data["pixel_values"] = torch.stack([x["pixel_values"] for x in batch]) data["labels"] = [x["labels"] for x in batch] if "pixel_mask" in batch[0]: data["pixel_mask"] = torch.stack([x["pixel_mask"] for x in batch]) return data @torch.no_grad() def compute_metrics( evaluation_results: EvalPrediction, image_processor: AutoImageProcessor, threshold: float = 0.0, id2label: Optional[Mapping[int, str]] = None, ) -> Mapping[str, float]: """ Compute mean average mAP, mAR and their variants for the object detection task. Args: evaluation_results (EvalPrediction): Predictions and targets from evaluation. threshold (float, optional): Threshold to filter predicted boxes by confidence. Defaults to 0.0. id2label (Optional[dict], optional): Mapping from class id to class name. Defaults to None. Returns: Mapping[str, float]: Metrics in a form of dictionary {<metric_name>: <metric_value>} """ predictions, targets = evaluation_results.predictions, evaluation_results.label_ids # For metric computation we need to provide: # - targets in a form of list of dictionaries with keys "boxes", "labels" # - predictions in a form of list of dictionaries with keys "boxes", "scores", "labels" image_sizes = [] post_processed_targets = [] post_processed_predictions = [] # Collect targets in the required format for metric computation for batch in targets: # collect image sizes, we will need them for predictions post processing batch_image_sizes = torch.tensor([x["orig_size"] for x in batch]) image_sizes.append(batch_image_sizes) # collect targets in the required format for metric computation # boxes were converted to YOLO format needed for model training # here we will convert them to Pascal VOC format (x_min, y_min, x_max, y_max) for image_target in batch: boxes = torch.tensor(image_target["boxes"]) boxes = convert_bbox_yolo_to_pascal(boxes, image_target["orig_size"]) labels = torch.tensor(image_target["class_labels"]) post_processed_targets.append({"boxes": boxes, "labels": labels}) # Collect predictions in the required format for metric computation, # model produce boxes in YOLO format, then image_processor convert them to Pascal VOC format for batch, target_sizes in zip(predictions, image_sizes): batch_logits, batch_boxes = batch[1], batch[2] output = ModelOutput(logits=torch.tensor(batch_logits), pred_boxes=torch.tensor(batch_boxes)) post_processed_output = image_processor.post_process_object_detection( output, threshold=threshold, target_sizes=target_sizes ) post_processed_predictions.extend(post_processed_output) # Compute metrics metric = MeanAveragePrecision(box_format="xyxy", class_metrics=True) metric.update(post_processed_predictions, post_processed_targets) metrics = metric.compute() # Replace list of per class metrics with separate metric for each class classes = metrics.pop("classes") map_per_class = metrics.pop("map_per_class") mar_100_per_class = metrics.pop("mar_100_per_class") for class_id, class_map, class_mar in zip(classes, map_per_class, mar_100_per_class): class_name = id2label[class_id.item()] if id2label is not None else class_id.item() metrics[f"map_{class_name}"] = class_map metrics[f"mar_100_{class_name}"] = class_mar metrics = {k: round(v.item(), 4) for k, v in metrics.items()} return metrics @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: str = field( default="cppe-5", metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." }, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) image_square_size: Optional[int] = field( default=600, metadata={"help": "Image longest size will be resized to this value, then image will be padded to square."}, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) use_fast: Optional[bool] = field( default=True, metadata={"help": "Use a fast torchvision-base image processor if it is supported for a given model."}, ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="facebook/detr-resnet-50", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) ignore_mismatched_sizes: bool = field( default=False, metadata={ "help": "Whether or not to raise an error if some of the weights from the checkpoint do not have the same size as the weights of the model (if for instance, you are instantiating a model with 10 labels from a checkpoint with 3 labels)." }, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_object_detection", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, " + f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif os.path.isdir(training_args.output_dir) and not training_args.overwrite_output_dir: checkpoint = get_last_checkpoint(training_args.output_dir) if checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # ------------------------------------------------------------------------------------------------ # Load dataset, prepare splits # ------------------------------------------------------------------------------------------------ dataset = load_dataset( data_args.dataset_name, cache_dir=model_args.cache_dir, trust_remote_code=model_args.trust_remote_code ) # If we don't have a validation split, split off a percentage of train as validation data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split, seed=training_args.seed) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Get dataset categories and prepare mappings for label_name <-> label_id categories = dataset["train"].features["objects"].feature["category"].names id2label = dict(enumerate(categories)) label2id = {v: k for k, v in id2label.items()} # ------------------------------------------------------------------------------------------------ # Load pretrained config, model and image processor # ------------------------------------------------------------------------------------------------ common_pretrained_args = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "token": model_args.token, "trust_remote_code": model_args.trust_remote_code, } config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, **common_pretrained_args, ) model = AutoModelForObjectDetection.from_pretrained( model_args.model_name_or_path, config=config, ignore_mismatched_sizes=model_args.ignore_mismatched_sizes, **common_pretrained_args, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, do_resize=True, size={"max_height": data_args.image_square_size, "max_width": data_args.image_square_size}, do_pad=True, pad_size={"height": data_args.image_square_size, "width": data_args.image_square_size}, use_fast=data_args.use_fast, **common_pretrained_args, ) # ------------------------------------------------------------------------------------------------ # Define image augmentations and dataset transforms # ------------------------------------------------------------------------------------------------ max_size = data_args.image_square_size train_augment_and_transform = A.Compose( [ A.Compose( [ A.SmallestMaxSize(max_size=max_size, p=1.0), A.RandomSizedBBoxSafeCrop(height=max_size, width=max_size, p=1.0), ], p=0.2, ), A.OneOf( [ A.Blur(blur_limit=7, p=0.5), A.MotionBlur(blur_limit=7, p=0.5), A.Defocus(radius=(1, 5), alias_blur=(0.1, 0.25), p=0.1), ], p=0.1, ), A.Perspective(p=0.1), A.HorizontalFlip(p=0.5), A.RandomBrightnessContrast(p=0.5), A.HueSaturationValue(p=0.1), ], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True, min_area=25), ) validation_transform = A.Compose( [A.NoOp()], bbox_params=A.BboxParams(format="coco", label_fields=["category"], clip=True), ) # Make transform functions for batch and apply for dataset splits train_transform_batch = partial( augment_and_transform_batch, transform=train_augment_and_transform, image_processor=image_processor ) validation_transform_batch = partial( augment_and_transform_batch, transform=validation_transform, image_processor=image_processor ) dataset["train"] = dataset["train"].with_transform(train_transform_batch) dataset["validation"] = dataset["validation"].with_transform(validation_transform_batch) dataset["test"] = dataset["test"].with_transform(validation_transform_batch) # ------------------------------------------------------------------------------------------------ # Model training and evaluation with Trainer API # ------------------------------------------------------------------------------------------------ eval_compute_metrics_fn = partial( compute_metrics, image_processor=image_processor, id2label=id2label, threshold=0.0 ) trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, processing_class=image_processor, data_collator=collate_fn, compute_metrics=eval_compute_metrics_fn, ) # Training if training_args.do_train: train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Final evaluation if training_args.do_eval: metrics = trainer.evaluate(eval_dataset=dataset["test"], metric_key_prefix="test") trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["object-detection", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
transformers/examples/pytorch/object-detection/run_object_detection.py/0
{ "file_path": "transformers/examples/pytorch/object-detection/run_object_detection.py", "repo_id": "transformers", "token_count": 8023 }
64
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation.""" import argparse import json import math import os import warnings from functools import partial from pathlib import Path import albumentations as A import datasets import evaluate import numpy as np import torch from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from albumentations.pytorch import ToTensorV2 from datasets import load_dataset from huggingface_hub import HfApi, hf_hub_download from torch.utils.data import DataLoader from tqdm.auto import tqdm import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForSemanticSegmentation, SchedulerType, default_data_collator, get_scheduler, ) from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") logger = get_logger(__name__) require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") def reduce_labels_transform(labels: np.ndarray, **kwargs) -> np.ndarray: """Set `0` label as with value 255 and then reduce all other labels by 1. Example: Initial class labels: 0 - background; 1 - road; 2 - car; Transformed class labels: 255 - background; 0 - road; 1 - car; **kwargs are required to use this function with albumentations. """ labels[labels == 0] = 255 labels = labels - 1 labels[labels == 254] = 255 return labels def parse_args(): parser = argparse.ArgumentParser(description="Finetune a transformers model on a image semantic segmentation task") parser.add_argument( "--model_name_or_path", type=str, help="Path to a pretrained model or model identifier from huggingface.co/models.", default="nvidia/mit-b0", ) parser.add_argument( "--dataset_name", type=str, help="Name of the dataset on the hub.", default="segments/sidewalk-semantic", ) parser.add_argument( "--do_reduce_labels", action="store_true", help="Whether or not to reduce all labels by 1 and replace background by 255.", ) parser.add_argument( "--reduce_labels", action="store_true", help="Whether or not to reduce all labels by 1 and replace background by 255.", ) parser.add_argument( "--train_val_split", type=float, default=0.15, help="Fraction of the dataset to be used for validation.", ) parser.add_argument( "--cache_dir", type=str, help="Path to a folder in which the model and dataset will be cached.", ) parser.add_argument( "--use_auth_token", action="store_true", help="Whether to use an authentication token to access the model repository.", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument( "--adam_beta1", type=float, default=0.9, help="Beta1 for AdamW optimizer", ) parser.add_argument( "--adam_beta2", type=float, default=0.999, help="Beta2 for AdamW optimizer", ) parser.add_argument( "--adam_epsilon", type=float, default=1e-8, help="Epsilon for AdamW optimizer", ) parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="polynomial", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--trust_remote_code", action="store_true", help=( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ), ) parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", required=False, action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. ' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args() # Sanity checks if args.push_to_hub or args.with_tracking: if args.output_dir is None: raise ValueError( "Need an `output_dir` to create a repo when `--push_to_hub` or `with_tracking` is specified." ) # Deprecation if args.reduce_labels: args.do_reduce_labels = args.reduce_labels warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.45. Please use `do_reduce_labels` instead.", FutureWarning, ) if args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) return args def main(): args = parse_args() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_semantic_segmentation_no_trainer", args) # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. # If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers # in the environment accelerator_log_kwargs = {} if args.with_tracking: accelerator_log_kwargs["log_with"] = args.report_to accelerator_log_kwargs["project_dir"] = args.output_dir accelerator = Accelerator(gradient_accumulation_steps=args.gradient_accumulation_steps, **accelerator_log_kwargs) logger.info(accelerator.state, main_process_only=False) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. # We set device_specific to True as we want different data augmentation per device. if args.seed is not None: set_seed(args.seed, device_specific=True) # Handle the repository creation if accelerator.is_main_process: if args.push_to_hub: # Retrieve of infer repo_name repo_name = args.hub_model_id if repo_name is None: repo_name = Path(args.output_dir).absolute().name # Create repo and retrieve repo_id api = HfApi() repo_id = api.create_repo(repo_name, exist_ok=True, token=args.hub_token).repo_id with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore: if "step_*" not in gitignore: gitignore.write("step_*\n") if "epoch_*" not in gitignore: gitignore.write("epoch_*\n") elif args.output_dir is not None: os.makedirs(args.output_dir, exist_ok=True) accelerator.wait_for_everyone() # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(args.dataset_name, cache_dir=args.cache_dir, trust_remote_code=args.trust_remote_code) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. args.train_val_split = None if "validation" in dataset.keys() else args.train_val_split if isinstance(args.train_val_split, float) and args.train_val_split > 0.0: split = dataset["train"].train_test_split(args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if args.dataset_name == "scene_parse_150": repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = args.dataset_name filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} # Load pretrained model and image processor config = AutoConfig.from_pretrained( args.model_name_or_path, id2label=id2label, label2id=label2id, trust_remote_code=args.trust_remote_code ) image_processor = AutoImageProcessor.from_pretrained( args.model_name_or_path, trust_remote_code=args.trust_remote_code ) model = AutoModelForSemanticSegmentation.from_pretrained( args.model_name_or_path, config=config, trust_remote_code=args.trust_remote_code, do_reduce_labels=args.do_reduce_labels, ) # Define transforms to be applied to each image and target. if "shortest_edge" in image_processor.size: # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. height, width = image_processor.size["shortest_edge"], image_processor.size["shortest_edge"] else: height, width = image_processor.size["height"], image_processor.size["width"] train_transforms = A.Compose( [ A.Lambda(name="reduce_labels", mask=reduce_labels_transform if args.do_reduce_labels else None, p=1.0), # pad image with 255, because it is ignored by loss A.PadIfNeeded(min_height=height, min_width=width, border_mode=0, value=255, p=1.0), A.RandomCrop(height=height, width=width, p=1.0), A.HorizontalFlip(p=0.5), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), ToTensorV2(), ] ) val_transforms = A.Compose( [ A.Lambda(name="reduce_labels", mask=reduce_labels_transform if args.do_reduce_labels else None, p=1.0), A.Resize(height=height, width=width, p=1.0), A.Normalize(mean=image_processor.image_mean, std=image_processor.image_std, max_pixel_value=255.0, p=1.0), ToTensorV2(), ] ) def preprocess_batch(example_batch, transforms: A.Compose): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): transformed = transforms(image=np.array(image.convert("RGB")), mask=np.array(target)) pixel_values.append(transformed["image"]) labels.append(transformed["mask"]) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values).to(torch.float) encoding["labels"] = torch.stack(labels).to(torch.long) return encoding # Preprocess function for dataset should have only one input argument, # so we use partial to pass transforms preprocess_train_batch_fn = partial(preprocess_batch, transforms=train_transforms) preprocess_val_batch_fn = partial(preprocess_batch, transforms=val_transforms) with accelerator.main_process_first(): train_dataset = dataset["train"].with_transform(preprocess_train_batch_fn) eval_dataset = dataset["validation"].with_transform(preprocess_val_batch_fn) train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=args.per_device_train_batch_size ) eval_dataloader = DataLoader( eval_dataset, collate_fn=default_data_collator, batch_size=args.per_device_eval_batch_size ) # Optimizer optimizer = torch.optim.AdamW( list(model.parameters()), lr=args.learning_rate, betas=[args.adam_beta1, args.adam_beta2], eps=args.adam_epsilon, ) # Figure out how many steps we should save the Accelerator states checkpointing_steps = args.checkpointing_steps if checkpointing_steps is not None and checkpointing_steps.isdigit(): checkpointing_steps = int(checkpointing_steps) # Scheduler and math around the number of training steps. overrode_max_train_steps = False num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if args.max_train_steps is None: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch overrode_max_train_steps = True lr_scheduler = get_scheduler( name=args.lr_scheduler_type, optimizer=optimizer, num_warmup_steps=args.num_warmup_steps * accelerator.num_processes, num_training_steps=args.max_train_steps if overrode_max_train_steps else args.max_train_steps * accelerator.num_processes, ) # Prepare everything with our `accelerator`. model, optimizer, train_dataloader, eval_dataloader, lr_scheduler = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader, lr_scheduler ) # We need to recalculate our total training steps as the size of the training dataloader may have changed. num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps) if overrode_max_train_steps: args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch # Afterwards we recalculate our number of training epochs args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch) # Instantiate metric metric = evaluate.load("mean_iou", cache_dir=args.cache_dir) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if args.with_tracking: experiment_config = vars(args) # TensorBoard cannot log Enums, need the raw value experiment_config["lr_scheduler_type"] = experiment_config["lr_scheduler_type"].value accelerator.init_trackers("semantic_segmentation_no_trainer", experiment_config) # Train! total_batch_size = args.per_device_train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {args.per_device_train_batch_size}") logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}") logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}") logger.info(f" Total optimization steps = {args.max_train_steps}") # Only show the progress bar once on each machine. progress_bar = tqdm(range(args.max_train_steps), disable=not accelerator.is_local_main_process) completed_steps = 0 starting_epoch = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": checkpoint_path = args.resume_from_checkpoint path = os.path.basename(args.resume_from_checkpoint) else: # Get the most recent checkpoint dirs = [f.name for f in os.scandir(os.getcwd()) if f.is_dir()] dirs.sort(key=os.path.getctime) path = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last checkpoint_path = path path = os.path.basename(checkpoint_path) accelerator.print(f"Resumed from checkpoint: {checkpoint_path}") accelerator.load_state(checkpoint_path) # Extract `epoch_{i}` or `step_{i}` training_difference = os.path.splitext(path)[0] if "epoch" in training_difference: starting_epoch = int(training_difference.replace("epoch_", "")) + 1 resume_step = None completed_steps = starting_epoch * num_update_steps_per_epoch else: # need to multiply `gradient_accumulation_steps` to reflect real steps resume_step = int(training_difference.replace("step_", "")) * args.gradient_accumulation_steps starting_epoch = resume_step // len(train_dataloader) completed_steps = resume_step // args.gradient_accumulation_steps resume_step -= starting_epoch * len(train_dataloader) # update the progress_bar if load from checkpoint progress_bar.update(completed_steps) for epoch in range(starting_epoch, args.num_train_epochs): model.train() if args.with_tracking: total_loss = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We skip the first `n` batches in the dataloader when resuming from a checkpoint active_dataloader = accelerator.skip_first_batches(train_dataloader, resume_step) else: active_dataloader = train_dataloader for step, batch in enumerate(active_dataloader): with accelerator.accumulate(model): outputs = model(**batch) loss = outputs.loss # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() # Checks if the accelerator has performed an optimization step behind the scenes if accelerator.sync_gradients: progress_bar.update(1) completed_steps += 1 if isinstance(checkpointing_steps, int): if completed_steps % checkpointing_steps == 0 and accelerator.sync_gradients: output_dir = f"step_{completed_steps}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save, ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if completed_steps >= args.max_train_steps: break logger.info("***** Running evaluation *****") model.eval() for step, batch in enumerate(tqdm(eval_dataloader, disable=not accelerator.is_local_main_process)): with torch.no_grad(): outputs = model(**batch) upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=batch["labels"].shape[-2:], mode="bilinear", align_corners=False ) predictions = upsampled_logits.argmax(dim=1) predictions, references = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=predictions, references=references, ) eval_metrics = metric.compute( num_labels=len(id2label), ignore_index=255, reduce_labels=False, # we've already reduced the labels before ) logger.info(f"epoch {epoch}: {eval_metrics}") if args.with_tracking: accelerator.log( { "mean_iou": eval_metrics["mean_iou"], "mean_accuracy": eval_metrics["mean_accuracy"], "overall_accuracy": eval_metrics["overall_accuracy"], "train_loss": total_loss.item() / len(train_dataloader), "epoch": epoch, "step": completed_steps, }, step=completed_steps, ) if args.push_to_hub and epoch < args.num_train_epochs - 1: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) api.upload_folder( commit_message=f"Training in progress epoch {epoch}", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) if args.checkpointing_steps == "epoch": output_dir = f"epoch_{epoch}" if args.output_dir is not None: output_dir = os.path.join(args.output_dir, output_dir) accelerator.save_state(output_dir) if args.with_tracking: accelerator.end_training() if args.output_dir is not None: accelerator.wait_for_everyone() unwrapped_model = accelerator.unwrap_model(model) unwrapped_model.save_pretrained( args.output_dir, is_main_process=accelerator.is_main_process, save_function=accelerator.save ) if accelerator.is_main_process: image_processor.save_pretrained(args.output_dir) if args.push_to_hub: api.upload_folder( commit_message="End of training", folder_path=args.output_dir, repo_id=repo_id, repo_type="model", token=args.hub_token, ) all_results = { f"eval_{k}": v.tolist() if isinstance(v, np.ndarray) else v for k, v in eval_metrics.items() } with open(os.path.join(args.output_dir, "all_results.json"), "w") as f: json.dump(all_results, f, indent=2) if __name__ == "__main__": main()
transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py/0
{ "file_path": "transformers/examples/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py", "repo_id": "transformers", "token_count": 11268 }
65
# Text Summarization with Pretrained Encoders This folder contains part of the code necessary to reproduce the results on abstractive summarization from the article [Text Summarization with Pretrained Encoders](https://arxiv.org/pdf/1908.08345.pdf) by [Yang Liu](https://nlp-yang.github.io/) and [Mirella Lapata](https://homepages.inf.ed.ac.uk/mlap/). It can also be used to summarize any document. The original code can be found on the Yang Liu's [github repository](https://github.com/nlpyang/PreSumm). The model is loaded with the pre-trained weights for the abstractive summarization model trained on the CNN/Daily Mail dataset with an extractive and then abstractive tasks. ## Setup ```bash git clone https://github.com/huggingface/transformers && cd transformers pip install . pip install nltk py-rouge cd examples/seq2seq/bertabs ``` ## Reproduce the authors' ROUGE score To be able to reproduce the authors' results on the CNN/Daily Mail dataset you first need to download both CNN and Daily Mail datasets [from Kyunghyun Cho's website](https://cs.nyu.edu/~kcho/DMQA/) (the links next to "Stories") in the same folder. Then uncompress the archives by running: ```bash tar -xvf cnn_stories.tgz && tar -xvf dailymail_stories.tgz ``` And move all the stories to the same folder. We will refer as `$DATA_PATH` the path to where you uncompressed both archive. Then run the following in the same folder as `run_summarization.py`: ```bash python run_summarization.py \ --documents_dir $DATA_PATH \ --summaries_output_dir $SUMMARIES_PATH \ # optional --no_cuda false \ --batch_size 4 \ --min_length 50 \ --max_length 200 \ --beam_size 5 \ --alpha 0.95 \ --block_trigram true \ --compute_rouge true ``` The scripts executes on GPU if one is available and if `no_cuda` is not set to `true`. Inference on multiple GPUs is not supported yet. The ROUGE scores will be displayed in the console at the end of evaluation and written in a `rouge_scores.txt` file. The script takes 30 hours to compute with a single Tesla V100 GPU and a batch size of 10 (300,000 texts to summarize). ## Summarize any text Put the documents that you would like to summarize in a folder (the path to which is referred to as `$DATA_PATH` below) and run the following in the same folder as `run_summarization.py`: ```bash python run_summarization.py \ --documents_dir $DATA_PATH \ --summaries_output_dir $SUMMARIES_PATH \ # optional --no_cuda false \ --batch_size 4 \ --min_length 50 \ --max_length 200 \ --beam_size 5 \ --alpha 0.95 \ --block_trigram true \ ``` You may want to play around with `min_length`, `max_length` and `alpha` to suit your use case. If you want to compute ROUGE on another dataset you will need to tweak the stories/summaries import in `utils_summarization.py` and tell it where to fetch the reference summaries.
transformers/examples/research_projects/bertabs/README.md/0
{ "file_path": "transformers/examples/research_projects/bertabs/README.md", "repo_id": "transformers", "token_count": 909 }
66
#!/bin/bash export CUDA_VISIBLE_DEVICES=0 PATH_TO_DATA=/h/xinji/projects/GLUE MODEL_TYPE=bert # bert or roberta MODEL_SIZE=base # base or large DATASET=MRPC # SST-2, MRPC, RTE, QNLI, QQP, or MNLI MODEL_NAME=${MODEL_TYPE}-${MODEL_SIZE} if [ $MODEL_TYPE = 'bert' ] then MODEL_NAME=${MODEL_NAME}-uncased fi python -u run_glue_deebert.py \ --model_type $MODEL_TYPE \ --model_name_or_path ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ --task_name $DATASET \ --do_eval \ --do_lower_case \ --data_dir $PATH_TO_DATA/$DATASET \ --output_dir ./saved_models/${MODEL_TYPE}-${MODEL_SIZE}/$DATASET/two_stage \ --plot_data_dir ./results/ \ --max_seq_length 128 \ --eval_each_highway \ --eval_highway \ --overwrite_cache \ --per_gpu_eval_batch_size=1
transformers/examples/research_projects/deebert/eval_deebert.sh/0
{ "file_path": "transformers/examples/research_projects/deebert/eval_deebert.sh", "repo_id": "transformers", "token_count": 360 }
67
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Preprocessing script before training DistilBERT. Specific to BERT -> DistilBERT. """ import argparse import torch from transformers import BertForMaskedLM if __name__ == "__main__": parser = argparse.ArgumentParser( description=( "Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned" " Distillation" ) ) parser.add_argument("--model_type", default="bert", choices=["bert"]) parser.add_argument("--model_name", default="bert-base-uncased", type=str) parser.add_argument("--dump_checkpoint", default="serialization_dir/tf_bert-base-uncased_0247911.pth", type=str) parser.add_argument("--vocab_transform", action="store_true") args = parser.parse_args() if args.model_type == "bert": model = BertForMaskedLM.from_pretrained(args.model_name) prefix = "bert" else: raise ValueError('args.model_type should be "bert".') state_dict = model.state_dict() compressed_sd = {} for w in ["word_embeddings", "position_embeddings"]: compressed_sd[f"distilbert.embeddings.{w}.weight"] = state_dict[f"{prefix}.embeddings.{w}.weight"] for w in ["weight", "bias"]: compressed_sd[f"distilbert.embeddings.LayerNorm.{w}"] = state_dict[f"{prefix}.embeddings.LayerNorm.{w}"] std_idx = 0 for teacher_idx in [0, 2, 4, 7, 9, 11]: for w in ["weight", "bias"]: compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.q_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.k_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.v_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.attention.out_lin.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.sa_layer_norm.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin1.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.ffn.lin2.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}" ] compressed_sd[f"distilbert.transformer.layer.{std_idx}.output_layer_norm.{w}"] = state_dict[ f"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}" ] std_idx += 1 compressed_sd["vocab_projector.weight"] = state_dict["cls.predictions.decoder.weight"] compressed_sd["vocab_projector.bias"] = state_dict["cls.predictions.bias"] if args.vocab_transform: for w in ["weight", "bias"]: compressed_sd[f"vocab_transform.{w}"] = state_dict[f"cls.predictions.transform.dense.{w}"] compressed_sd[f"vocab_layer_norm.{w}"] = state_dict[f"cls.predictions.transform.LayerNorm.{w}"] print(f"N layers selected for distillation: {std_idx}") print(f"Number of params transferred for distillation: {len(compressed_sd.keys())}") print(f"Save transferred checkpoint to {args.dump_checkpoint}.") torch.save(compressed_sd, args.dump_checkpoint)
transformers/examples/research_projects/distillation/scripts/extract_distilbert.py/0
{ "file_path": "transformers/examples/research_projects/distillation/scripts/extract_distilbert.py", "repo_id": "transformers", "token_count": 1865 }
68
# Information Gain Filtration(IGF) Authors @Tuko @mraunak This folder contains the code how to implement IGF for finetuning on GPT-2. ## What is IGF? Here we present a general fine-tuning method that we call information gain filtration for improving the overall training efficiency and final performance of language model fine-tuning(see paper below). The method is an alternative fine-tuning method that trains a secondary model (e.g., a simple convolutional network) to predict the amount of information gained over a given pre-trained model. The secondary model is lightweight and trained to predict the Information Gain measure. Information Gain is defined as the change in a loss function for a model before and after an SGD update with a sample (Equation X in the paper). A small subset of the training set named the “objective” set, is used to measure information gain on the pre-trained model, and consequently to train the secondary model. After training, the model is used for filtering samples for the fine-tuning process. Therefore, a high information gain value would suggest a sample is informative, whereas a low value would suggest a non-informative sample that should be filtered out. Thus, a thresholding strategy is defined to select informative samples. With such a strategy, samples are filtered and once enough samples are selected to form a mini-batch and a usual fine-tuning/optimization step is applied. The filtration process is repeated until the fine-tuning process is over. Paper [Selecting Informative Contexts Improves Language Model Finetuning](https://arxiv.org/abs/2005.00175) # Results Several experiments were conducted to show the robustness of the IGF method versus the standard fine-tuning process. For example, we achieve a median perplexity of 54.0 on the Books dataset compared to 57.3 for standard fine-tuning on GPT-2 Small. The code was implemented using the Transformers library and Pytorch. While the method may seem more expensive, we saw enough evidence that it may lead to a performance benefit in the final models. ![IGF performance](result_igf.png) Figure 1: Comparing IGF to Standard Fine-tuning: IGF with constant (p < 10−3 , t-test) and shifting(p < 10−6 , t-test) thresholding significantly outperform standard fine-tuning. The left-hand figure shows test-set perplexity after each fine-tuning batch, averaged over 50 runs (error bars denote ± one standard error). The right-hand figure shows the perplexity of each method after 60 batches. IGF with shifting thresholding (red) clearly improves over standard batched fine-tuning with Adam ## How to use this project? To fine-tune a transformer model with IGF on a language modeling task, use the following script: - `model_name_or_path`: Path to pretrained model or model identifier from huggingface.co/models - `data_file`: A jbl file containing tokenized data which can be split as objective dataset, train_dataset and test_dataset - `igf_data_file`: A jbl file containing the context and information gain pairs to train secondary learner. - `context_len`: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. - `size_objective_set`: Number of articles that are long enough to be used as our objective set" - `min_len`: The minimum length of the article to be used as objective set - `trim`: Truncate the example if it exceeds context length - `eval_freq`: Secondary model evaluation can be triggered at eval_freq - `max_steps`: To calculate training epochs - `number`: The number of examples split to be used as objective_set/test_data - `secondary_learner_batch_size`: The batch size of training data for secondary learner - `secondary_learner_max_epochs`: The number of epochs to train secondary learner - `recopy_model`: Reset the model to the original pretrained GPT-2 weights after each iteration - `eval_interval`: Decay the selectivity of our secondary learner filter from" 1 standard deviation above average to 1 below average after eval_interval(10) batches" ```python python run_clm_igf.py\ --model_name_or_path "openai-community/gpt2" \ --data_file="data/tokenized_stories_train_wikitext103" \ --igf_data_file="data/IGF_values" \ --context_len 32 \ --size_objective_set 100 \ --min_len 1026 \ --trim True \ --eval_freq 100 \ --max_steps 1000 \ --secondary_learner_batch_size 128 \ --secondary_learner_max_epochs 15 \ --number 100 \ --recopy_model \ --eval_interval 10 \ ``` ## Citation If you find the resource useful, please cite the following paper ```bibtex @inproceedings{antonello-etal-2021-selecting, title = "Selecting Informative Contexts Improves Language Model Fine-tuning", author = "Antonello, Richard and Beckage, Nicole and Turek, Javier and Huth, Alexander", booktitle = "Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers)", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.acl-long.87", doi = "10.18653/v1/2021.acl-long.87", pages = "1072--1085", } ```
transformers/examples/research_projects/information-gain-filtration/README.md/0
{ "file_path": "transformers/examples/research_projects/information-gain-filtration/README.md", "repo_id": "transformers", "token_count": 1461 }
69
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ import logging import os import sys import time from collections import defaultdict from dataclasses import dataclass, field # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. from pathlib import Path from typing import Dict, List, Optional, Tuple import datasets import flax import jax import jax.numpy as jnp import numpy as np import optax from datasets import load_dataset from flax import jax_utils, traverse_util from flax.training import train_state from flax.training.common_utils import get_metrics, onehot, shard from tqdm import tqdm from transformers import ( CONFIG_MAPPING, FLAX_MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoTokenizer, FlaxAutoModelForMaskedLM, HfArgumentParser, PreTrainedTokenizerBase, TensorType, TrainingArguments, is_tensorboard_available, set_seed, ) if datasets.__version__ <= "1.8.0": raise ValueError("Make sure to upgrade `datasets` to a version >= 1.9.0 to use dataset streaming") MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) dtype: Optional[str] = field( default="float32", metadata={ "help": ( "Floating-point format in which the model weights should be initialized and trained. Choose one of" " `[float32, float16, bfloat16]`." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) line_by_line: bool = field( default=False, metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."}, ) text_column_name: str = field( default="text", metadata={"help": "The name of the column to retrieve the training text."} ) shuffle_buffer_size: int = field( default=10000, metadata={"help": "The number of examples to pre-load for shuffling."} ) num_train_steps: int = field(default=50000, metadata={"help": "The number of training steps."}) num_eval_samples: int = field(default=50000, metadata={"help": "The number of samples to be used for evaluation"}) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." @flax.struct.dataclass class FlaxDataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm_probability: float = 0.15 def __post_init__(self): if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__(self, examples: List[Dict[str, np.ndarray]]) -> Dict[str, np.ndarray]: # Handle dict or lists with proper padding and conversion to tensor. batch = self.tokenizer.pad(examples, return_tensors=TensorType.NUMPY) # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) return batch def mask_tokens( self, inputs: np.ndarray, special_tokens_mask: Optional[np.ndarray] ) -> Tuple[jnp.ndarray, jnp.ndarray]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.copy() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = np.full(labels.shape, self.mlm_probability) special_tokens_mask = special_tokens_mask.astype("bool") probability_matrix[special_tokens_mask] = 0.0 masked_indices = np.random.binomial(1, probability_matrix).astype("bool") labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = np.random.binomial(1, np.full(labels.shape, 0.8)).astype("bool") & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = np.random.binomial(1, np.full(labels.shape, 0.5)).astype("bool") indices_random &= masked_indices & ~indices_replaced random_words = np.random.randint(self.tokenizer.vocab_size, size=labels.shape, dtype="i4") inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels def generate_batch_splits(samples_idx: np.ndarray, batch_size: int) -> np.ndarray: num_samples = len(samples_idx) samples_to_remove = num_samples % batch_size if samples_to_remove != 0: samples_idx = samples_idx[:-samples_to_remove] sections_split = num_samples // batch_size batch_idx = np.split(samples_idx, sections_split) return batch_idx def advance_iter_and_group_samples(train_iterator, num_samples, max_seq_length): """ The training iterator is advanced so that after groupifying the samples, `num_samples` of length `max_seq_length` are returned. """ num_total_tokens = max_seq_length * num_samples samples = defaultdict(list) i = 0 while i < num_total_tokens: tokenized_samples = next(train_iterator) i += len(tokenized_samples["input_ids"]) # concatenate tokenized samples to list (excluding "id" and "text") samples = { k: samples[k] + tokenized_samples[k] for k in ["input_ids", "attention_mask", "special_tokens_mask"] } # Concatenated tokens are split to lists of length `max_seq_length`. # Note that remainedr of % max_seq_length are thrown away. def group_texts(examples): result = { k: [t[i : i + max_seq_length] for i in range(0, num_total_tokens, max_seq_length)] for k, t in examples.items() } return result grouped_samples = group_texts(samples) return grouped_samples def write_train_metric(summary_writer, train_metrics, train_time, step): summary_writer.scalar("train_time", train_time, step) train_metrics = get_metrics(train_metrics) for key, vals in train_metrics.items(): tag = f"train_{key}" for i, val in enumerate(vals): summary_writer.scalar(tag, val, step - len(vals) + i + 1) def write_eval_metric(summary_writer, eval_metrics, step): for metric_name, value in eval_metrics.items(): summary_writer.scalar(f"eval_{metric_name}", value, step) if __name__ == "__main__": # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level="INFO", datefmt="[%X]", ) # Log on each process the small summary: logger = logging.getLogger(__name__) logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. dataset = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, streaming=True, split="train", ) if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, cache_dir=model_args.cache_dir) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, cache_dir=model_args.cache_dir) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer ) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) # Otherwise, we tokenize every text, then concatenate them together before splitting them in smaller parts. # We use `return_special_tokens_mask=True` because DataCollatorForLanguageModeling (see below) is more # efficient when it receives the `special_tokens_mask`. def tokenize_function(examples): return tokenizer(examples[data_args.text_column_name], return_special_tokens_mask=True) tokenized_datasets = dataset.map(tokenize_function, batched=True, remove_columns=list(dataset.features.keys())) shuffle_seed = training_args.seed tokenized_datasets = tokenized_datasets.shuffle(buffer_size=data_args.shuffle_buffer_size, seed=shuffle_seed) has_tensorboard = is_tensorboard_available() if has_tensorboard and jax.process_index() == 0: try: from flax.metrics.tensorboard import SummaryWriter except ImportError as ie: has_tensorboard = False logger.warning( f"Unable to display metrics through TensorBoard because some package are not installed: {ie}" ) summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir)) # Data collator # This one will take care of randomly masking the tokens. data_collator = FlaxDataCollatorForLanguageModeling(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our training rng = jax.random.PRNGKey(training_args.seed) dropout_rngs = jax.random.split(rng, jax.local_device_count()) if model_args.model_name_or_path: model = FlaxAutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, config=config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) else: model = FlaxAutoModelForMaskedLM.from_config( config, seed=training_args.seed, dtype=getattr(jnp, model_args.dtype) ) # Store some constant num_epochs = int(training_args.num_train_epochs) train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count() eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count() # define number steps per stream epoch num_train_steps = data_args.num_train_steps # Create learning rate schedule warmup_fn = optax.linear_schedule( init_value=0.0, end_value=training_args.learning_rate, transition_steps=training_args.warmup_steps ) decay_fn = optax.linear_schedule( init_value=training_args.learning_rate, end_value=0, transition_steps=num_train_steps - training_args.warmup_steps, ) linear_decay_lr_schedule_fn = optax.join_schedules( schedules=[warmup_fn, decay_fn], boundaries=[training_args.warmup_steps] ) # We use Optax's "masking" functionality to not apply weight decay # to bias and LayerNorm scale parameters. decay_mask_fn returns a # mask boolean with the same structure as the parameters. # The mask is True for parameters that should be decayed. # Note that this mask is specifically adapted for FlaxBERT-like models. # For other models, one should correct the layer norm parameter naming # accordingly. def decay_mask_fn(params): flat_params = traverse_util.flatten_dict(params) flat_mask = {path: (path[-1] != "bias" and path[-2:] != ("LayerNorm", "scale")) for path in flat_params} return traverse_util.unflatten_dict(flat_mask) # create adam optimizer adamw = optax.adamw( learning_rate=linear_decay_lr_schedule_fn, b1=training_args.adam_beta1, b2=training_args.adam_beta2, eps=training_args.adam_epsilon, weight_decay=training_args.weight_decay, mask=decay_mask_fn, ) # Setup train state state = train_state.TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw) # Define gradient update step fn def train_step(state, batch, dropout_rng): dropout_rng, new_dropout_rng = jax.random.split(dropout_rng) def loss_fn(params): labels = batch.pop("labels") logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # take average loss = loss.sum() / label_mask.sum() return loss grad_fn = jax.value_and_grad(loss_fn) loss, grad = grad_fn(state.params) grad = jax.lax.pmean(grad, "batch") new_state = state.apply_gradients(grads=grad) metrics = jax.lax.pmean( {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}, axis_name="batch" ) return new_state, metrics, new_dropout_rng # Create parallel version of the train step p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,)) # Define eval fn def eval_step(params, batch): labels = batch.pop("labels") logits = model(**batch, params=params, train=False)[0] # compute loss, ignore padded input tokens label_mask = jnp.where(labels > 0, 1.0, 0.0) loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) * label_mask # compute accuracy accuracy = jnp.equal(jnp.argmax(logits, axis=-1), labels) * label_mask # summarize metrics metrics = {"loss": loss.sum(), "accuracy": accuracy.sum(), "normalizer": label_mask.sum()} metrics = jax.lax.psum(metrics, axis_name="batch") return metrics p_eval_step = jax.pmap(eval_step, "batch", donate_argnums=(0,)) # Replicate the train state on each device state = jax_utils.replicate(state) train_time = 0 train_start = time.time() train_metrics = [] eval_metrics = [] training_iter = iter(tokenized_datasets) max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) eval_samples = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) steps = tqdm(range(num_train_steps), desc="Training...", position=0) for step in range(num_train_steps): # ======================== Training ================================ try: samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) except StopIteration: # Once the end of the dataset stream is reached, the training iterator # is reinitialized and reshuffled and a new eval dataset is randomly chosen. shuffle_seed += 1 tokenized_datasets.set_epoch(shuffle_seed) training_iter = iter(tokenized_datasets) eval_dataset = advance_iter_and_group_samples(training_iter, data_args.num_eval_samples, max_seq_length) samples = advance_iter_and_group_samples(training_iter, train_batch_size, max_seq_length) # process input samples model_inputs = data_collator(samples) # Model forward model_inputs = shard(model_inputs.data) state, train_metric, dropout_rngs = p_train_step(state, model_inputs, dropout_rngs) train_metrics.append(train_metric) if step % training_args.logging_steps == 0 and step > 0: steps.write( f"Step... ({step} | Loss: {train_metric['loss'].mean()}, Learning Rate:" f" {train_metric['learning_rate'].mean()})" ) train_time += time.time() - train_start if has_tensorboard and jax.process_index() == 0: write_train_metric(summary_writer, train_metrics, train_time, step) train_metrics = [] # ======================== Evaluating ============================== if step % training_args.eval_steps == 0 and step > 0: # Avoid using jax.numpy here in case of TPU training eval_samples_idx = np.arange(data_args.num_eval_samples) eval_batch_idx = generate_batch_splits(eval_samples_idx, eval_batch_size) for i, batch_idx in enumerate(tqdm(eval_batch_idx, desc="Evaluating ...", position=1)): # process input samples batch_eval_samples = {k: [v[idx] for idx in batch_idx] for k, v in eval_samples.items()} model_inputs = data_collator(batch_eval_samples) # Model forward model_inputs = shard(model_inputs.data) metrics = p_eval_step(state.params, model_inputs) eval_metrics.append(metrics) # normalize eval metrics eval_metrics = get_metrics(eval_metrics) eval_metrics = jax.tree_util.tree_map(jnp.sum, eval_metrics) eval_normalizer = eval_metrics.pop("normalizer") eval_metrics = jax.tree_util.tree_map(lambda x: x / eval_normalizer, eval_metrics) # Update progress bar steps.desc = ( f"Step... ({step + 1}/{num_train_steps} | Loss: {eval_metrics['loss']}, Acc:" f" {eval_metrics['accuracy']})" ) if has_tensorboard and jax.process_index() == 0: write_eval_metric(summary_writer, eval_metrics, step) eval_metrics = [] # save checkpoint after each epoch and push checkpoint to the hub if jax.process_index() == 0: params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params)) model.save_pretrained( training_args.output_dir, params=params, push_to_hub=training_args.push_to_hub, commit_message=f"Saving weights and logs of step {step+1}", ) # update tqdm bar steps.update(1)
transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py/0
{ "file_path": "transformers/examples/research_projects/jax-projects/dataset-streaming/run_mlm_flax_stream.py", "repo_id": "transformers", "token_count": 10618 }
70
import functools import math import os # noqa: F401 from random import choice, randint from time import time import datasets # noqa: F401 import faiss # noqa: F401 import numpy as np import pandas as pd import torch import torch.utils.checkpoint as checkpoint from elasticsearch import Elasticsearch # noqa: F401 from elasticsearch.helpers import bulk, streaming_bulk # noqa: F401 from torch import nn from torch.utils.data import DataLoader, Dataset, RandomSampler, SequentialSampler from tqdm import tqdm from transformers import AdamW, AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer, get_linear_schedule_with_warmup pd.set_option("display.max_colwidth", None) ############### # Sparse index ############### def make_es_index_snippets(es_client, passages_dset, index_name="english_wiki_kilt_snippets_100w"): index_config = { "settings": { "number_of_shards": 1, "analysis": {"analyzer": {"stop_standard": {"type": "standard", " stopwords": "_english_"}}}, }, "mappings": { "properties": { "article_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "section_title": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, "passage_text": {"type": "text", "analyzer": "standard", "similarity": "BM25"}, } }, } es_client.indices.create(index=index_name, body=index_config) number_of_docs = passages_dset.num_rows progress = tqdm(unit="docs", total=number_of_docs) successes = 0 def passage_generator(): for passage in passages_dset: yield passage # create the ES index for ok, action in streaming_bulk( client=es_client, index=index_name, actions=passage_generator(), ): progress.update(1) successes += ok print("Indexed %d documents" % (successes,)) def query_es_index(question, es_client, index_name="english_wiki_kilt_snippets_100w", n_results=10, min_length=20): q = question.lower() banned = ["how", "why", "what", "where", "which", "do", "does", "is", "?", "eli5", "eli5:"] q = " ".join([w for w in q.split() if w not in banned]) response = es_client.search( index=index_name, body={ "query": { "multi_match": { "query": q, "fields": ["article_title", "section_title", "passage_text^2"], "type": "cross_fields", } }, "size": 2 * n_results, }, ) hits = response["hits"]["hits"] support_doc = "<P> " + " <P> ".join([hit["_source"]["passage_text"] for hit in hits]) res_list = [{k: hit["_source"][k] for k in hit["_source"] if k != "passage_text"} for hit in hits] for r, hit in zip(res_list, hits): r["passage_id"] = hit["_id"] r["score"] = hit["_score"] r["passage_text"] = hit["_source"]["passage_text"] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] return support_doc, res_list ############### # ELI5 retriever training ############### class ELI5DatasetQARetriver(Dataset): def __init__(self, examples_array, extra_answer_threshold=3, min_answer_length=64, training=True, n_samples=None): self.data = examples_array self.answer_thres = extra_answer_threshold self.min_length = min_answer_length self.training = training self.n_samples = self.data.num_rows if n_samples is None else n_samples def __len__(self): return self.n_samples def make_example(self, idx): example = self.data[idx] question = example["title"] if self.training: answers = [a for i, (a, sc) in enumerate(zip(example["answers"]["text"], example["answers"]["score"]))] answer_tab = choice(answers).split(" ") start_idx = randint(0, max(0, len(answer_tab) - self.min_length)) answer_span = " ".join(answer_tab[start_idx:]) else: answer_span = example["answers"]["text"][0] return (question, answer_span) def __getitem__(self, idx): return self.make_example(idx % self.data.num_rows) class RetrievalQAEmbedder(nn.Module): def __init__(self, sent_encoder, dim): super(RetrievalQAEmbedder, self).__init__() self.sent_encoder = sent_encoder self.output_dim = 128 self.project_q = nn.Linear(dim, self.output_dim, bias=False) self.project_a = nn.Linear(dim, self.output_dim, bias=False) self.ce_loss = nn.CrossEntropyLoss(reduction="mean") def embed_sentences_checkpointed(self, input_ids, attention_mask, checkpoint_batch_size=-1): # reproduces BERT forward pass with checkpointing if checkpoint_batch_size < 0 or input_ids.shape[0] < checkpoint_batch_size: return self.sent_encoder(input_ids, attention_mask=attention_mask)[1] else: # prepare implicit variables device = input_ids.device input_shape = input_ids.size() token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) head_mask = [None] * self.sent_encoder.config.num_hidden_layers extended_attention_mask: torch.Tensor = self.sent_encoder.get_extended_attention_mask( attention_mask, input_shape ) # define function for checkpointing def partial_encode(*inputs): encoder_outputs = self.sent_encoder.encoder( inputs[0], attention_mask=inputs[1], head_mask=head_mask, ) sequence_output = encoder_outputs[0] pooled_output = self.sent_encoder.pooler(sequence_output) return pooled_output # run embedding layer on everything at once embedding_output = self.sent_encoder.embeddings( input_ids=input_ids, position_ids=None, token_type_ids=token_type_ids, inputs_embeds=None ) # run encoding and pooling on one mini-batch at a time pooled_output_list = [] for b in range(math.ceil(input_ids.shape[0] / checkpoint_batch_size)): b_embedding_output = embedding_output[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] b_attention_mask = extended_attention_mask[b * checkpoint_batch_size : (b + 1) * checkpoint_batch_size] pooled_output = checkpoint.checkpoint(partial_encode, b_embedding_output, b_attention_mask) pooled_output_list.append(pooled_output) return torch.cat(pooled_output_list, dim=0) def embed_questions(self, q_ids, q_mask, checkpoint_batch_size=-1): q_reps = self.embed_sentences_checkpointed(q_ids, q_mask, checkpoint_batch_size) return self.project_q(q_reps) def embed_answers(self, a_ids, a_mask, checkpoint_batch_size=-1): a_reps = self.embed_sentences_checkpointed(a_ids, a_mask, checkpoint_batch_size) return self.project_a(a_reps) def forward(self, q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=-1): device = q_ids.device q_reps = self.embed_questions(q_ids, q_mask, checkpoint_batch_size) a_reps = self.embed_answers(a_ids, a_mask, checkpoint_batch_size) compare_scores = torch.mm(q_reps, a_reps.t()) loss_qa = self.ce_loss(compare_scores, torch.arange(compare_scores.shape[1]).to(device)) loss_aq = self.ce_loss(compare_scores.t(), torch.arange(compare_scores.shape[0]).to(device)) loss = (loss_qa + loss_aq) / 2 return loss def make_qa_retriever_model(model_name="google/bert_uncased_L-8_H-512_A-8", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) bert_model = AutoModel.from_pretrained(model_name).to(device) # run bert_model on a dummy batch to get output dimension d_ids = torch.LongTensor( [[bert_model.config.bos_token_id if bert_model.config.bos_token_id is not None else 1]] ).to(device) d_mask = torch.LongTensor([[1]]).to(device) sent_dim = bert_model(d_ids, attention_mask=d_mask)[1].shape[-1] qa_embedder = RetrievalQAEmbedder(bert_model, sent_dim).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states qa_embedder.load_state_dict(param_dict["model"]) return tokenizer, qa_embedder def make_qa_retriever_batch(qa_list, tokenizer, max_len=64, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer(a_ls, max_length=max_len, padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) return (q_ids, q_mask, a_ids, a_mask) def train_qa_retriever_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0): model.train() # make iterator train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch pre_loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) loss = pre_loss.sum() # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def train_qa_retriever_joint_epoch(model, dataset_list, tokenizer, optimizer, scheduler, args, e=0): model.train() model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) # make iterator train_samplers = [RandomSampler(dataset) for dataset in dataset_list] data_loaders = [ DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) for dataset, train_sampler in zip(dataset_list, train_samplers) ] iterators = [iter(dloader) for dloader in data_loaders] joint_iter = zip(*iterators) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, (batches,) in enumerate(zip(joint_iter)): for batch in batches: q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask, checkpoint_batch_size=args.checkpoint_batch_size) # optimizer loss.backward() optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset_list[0]) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def evaluate_qa_retriever(model, dataset, tokenizer, args): model.eval() # make iterator eval_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_retriever_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=eval_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) tot_loss = 0.0 with torch.no_grad(): for step, batch in enumerate(epoch_iterator): q_ids, q_mask, a_ids, a_mask = batch loss = model(q_ids, q_mask, a_ids, a_mask) tot_loss += loss.item() return tot_loss / (step + 1) def train_qa_retriever(qar_model, qar_tokenizer, qar_train_dset, qar_valid_dset, qar_args): qar_optimizer = AdamW(qar_model.parameters(), lr=qar_args.learning_rate, eps=1e-8) qar_scheduler = get_linear_schedule_with_warmup( qar_optimizer, num_warmup_steps=100, num_training_steps=(qar_args.num_epochs + 1) * math.ceil(len(qar_train_dset) / qar_args.batch_size), ) for e in range(qar_args.num_epochs): train_qa_retriever_epoch(qar_model, qar_train_dset, qar_tokenizer, qar_optimizer, qar_scheduler, qar_args, e) m_save_dict = { "model": qar_model.state_dict(), "optimizer": qar_optimizer.state_dict(), "scheduler": qar_scheduler.state_dict(), } print("Saving model {}".format(qar_args.model_save_name)) torch.save(m_save_dict, "{}_{}.pth".format(qar_args.model_save_name, e)) eval_loss = evaluate_qa_retriever(qar_model, qar_valid_dset, qar_tokenizer, qar_args) print("Evaluation loss epoch {:4d}: {:.3f}".format(e, eval_loss)) ############### # ELI5 seq2seq model training ############### class ELI5DatasetS2S(Dataset): def __init__( self, examples_array, make_doc_fun=None, extra_answer_threshold=3, document_cache=None, training=True ): self.training = training self.data = examples_array self.make_doc_function = make_doc_fun self.document_cache = {} if document_cache is None else document_cache assert not (make_doc_fun is None and document_cache is None) # make index of specific question-answer pairs from multi-answers if self.training: self.qa_id_list = [ (i, j) for i, qa in enumerate(self.data) for j, (a, sc) in enumerate(zip(qa["answers"]["text"], qa["answers"]["score"])) if j == 0 or sc >= extra_answer_threshold ] else: self.qa_id_list = [(i, 0) for i in range(self.data.num_rows)] def __len__(self): return len(self.qa_id_list) def make_example(self, idx): i, j = self.qa_id_list[idx] example = self.data[i] question = example["title"] + " " + example["selftext"] answer = example["answers"]["text"][j] q_id = example["q_id"] if self.make_doc_function is not None: self.document_cache[q_id] = self.document_cache.get(q_id, self.make_doc_function(example["title"])) document = self.document_cache[q_id] in_st = "question: {} context: {}".format( question.lower().replace(" --t--", "").strip(), document.lower().strip(), ) out_st = answer return (in_st, out_st) def __getitem__(self, idx): return self.make_example(idx) def make_qa_s2s_model(model_name="facebook/bart-large", from_file=None, device="cuda:0"): tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name).to(device) if from_file is not None: param_dict = torch.load(from_file) # has model weights, optimizer, and scheduler states model.load_state_dict(param_dict["model"]) return tokenizer, model def make_qa_s2s_batch(qa_list, tokenizer, max_len=64, max_a_len=360, device="cuda:0"): q_ls = [q for q, a in qa_list] a_ls = [a for q, a in qa_list] q_toks = tokenizer(q_ls, max_length=max_len, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) a_toks = tokenizer(a_ls, max_length=min(max_len, max_a_len), padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) lm_labels = a_ids[:, 1:].contiguous().clone() lm_labels[a_mask[:, 1:].contiguous() == 0] = -100 model_inputs = { "input_ids": q_ids, "attention_mask": q_mask, "decoder_input_ids": a_ids[:, :-1].contiguous(), "lm_labels": lm_labels, } return model_inputs def train_qa_s2s_epoch(model, dataset, tokenizer, optimizer, scheduler, args, e=0, curriculum=False): model.train() # make iterator if curriculum: train_sampler = SequentialSampler(dataset) else: train_sampler = RandomSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loss.backward() # optimizer if step % args.backward_freq == 0: optimizer.step() scheduler.step() model.zero_grad() # some printing within the epoch loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0 or step == 1: print( "{:2d} {:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( e, step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) loc_loss = 0 loc_steps = 0 def eval_qa_s2s_epoch(model, dataset, tokenizer, args): model.eval() # make iterator train_sampler = SequentialSampler(dataset) model_collate_fn = functools.partial( make_qa_s2s_batch, tokenizer=tokenizer, max_len=args.max_length, device="cuda:0" ) data_loader = DataLoader(dataset, batch_size=args.batch_size, sampler=train_sampler, collate_fn=model_collate_fn) epoch_iterator = tqdm(data_loader, desc="Iteration", disable=True) # accumulate loss since last print loc_steps = 0 loc_loss = 0.0 st_time = time() with torch.no_grad(): for step, batch_inputs in enumerate(epoch_iterator): pre_loss = model(**batch_inputs)[0] loss = pre_loss.sum() / pre_loss.shape[0] loc_loss += loss.item() loc_steps += 1 if step % args.print_freq == 0: print( "{:5d} of {:5d} \t L: {:.3f} \t -- {:.3f}".format( step, len(dataset) // args.batch_size, loc_loss / loc_steps, time() - st_time, ) ) print( "Total \t L: {:.3f} \t -- {:.3f}".format( loc_loss / loc_steps, time() - st_time, ) ) def train_qa_s2s(qa_s2s_model, qa_s2s_tokenizer, s2s_train_dset, s2s_valid_dset, s2s_args): s2s_optimizer = AdamW(qa_s2s_model.parameters(), lr=s2s_args.learning_rate, eps=1e-8) s2s_scheduler = get_linear_schedule_with_warmup( s2s_optimizer, num_warmup_steps=400, num_training_steps=(s2s_args.num_epochs + 1) * math.ceil(len(s2s_train_dset) / s2s_args.batch_size), ) for e in range(s2s_args.num_epochs): train_qa_s2s_epoch( qa_s2s_model, s2s_train_dset, qa_s2s_tokenizer, s2s_optimizer, s2s_scheduler, s2s_args, e, curriculum=(e == 0), ) m_save_dict = { "model": qa_s2s_model.state_dict(), "optimizer": s2s_optimizer.state_dict(), "scheduler": s2s_scheduler.state_dict(), } print("Saving model {}".format(s2s_args.model_save_name)) eval_qa_s2s_epoch(qa_s2s_model, s2s_valid_dset, qa_s2s_tokenizer, s2s_args) torch.save(m_save_dict, "{}_{}.pth".format(s2s_args.model_save_name, e)) # generate answer from input "question: ... context: <p> ..." def qa_s2s_generate( question_doc, qa_s2s_model, qa_s2s_tokenizer, num_answers=1, num_beams=None, min_len=64, max_len=256, do_sample=False, temp=1.0, top_p=None, top_k=None, max_input_length=512, device="cuda:0", ): model_inputs = make_qa_s2s_batch( [(question_doc, "A")], qa_s2s_tokenizer, max_input_length, device=device, ) n_beams = num_answers if num_beams is None else max(num_beams, num_answers) generated_ids = qa_s2s_model.generate( input_ids=model_inputs["input_ids"], attention_mask=model_inputs["attention_mask"], min_length=min_len, max_length=max_len, do_sample=do_sample, early_stopping=True, num_beams=1 if do_sample else n_beams, temperature=temp, top_k=top_k, top_p=top_p, eos_token_id=qa_s2s_tokenizer.eos_token_id, no_repeat_ngram_size=3, num_return_sequences=num_answers, decoder_start_token_id=qa_s2s_tokenizer.bos_token_id, ) return [qa_s2s_tokenizer.decode(ans_ids, skip_special_tokens=True).strip() for ans_ids in generated_ids] ############### # ELI5-trained retrieval model usage ############### def embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length=128, device="cuda:0"): a_toks = tokenizer(passages, max_length=max_length, padding="max_length", truncation=True) a_ids, a_mask = ( torch.LongTensor(a_toks["input_ids"]).to(device), torch.LongTensor(a_toks["attention_mask"]).to(device), ) with torch.no_grad(): a_reps = qa_embedder.embed_answers(a_ids, a_mask).cpu().type(torch.float) return a_reps.numpy() def embed_questions_for_retrieval(q_ls, tokenizer, qa_embedder, device="cuda:0"): q_toks = tokenizer(q_ls, max_length=128, padding="max_length", truncation=True) q_ids, q_mask = ( torch.LongTensor(q_toks["input_ids"]).to(device), torch.LongTensor(q_toks["attention_mask"]).to(device), ) with torch.no_grad(): q_reps = qa_embedder.embed_questions(q_ids, q_mask).cpu().type(torch.float) return q_reps.numpy() def make_qa_dense_index( qa_embedder, tokenizer, passages_dset, batch_size=512, max_length=128, index_name="kilt_passages_reps.dat", dtype="float32", device="cuda:0", ): st_time = time() fp = np.memmap(index_name, dtype=dtype, mode="w+", shape=(passages_dset.num_rows, 128)) n_batches = math.ceil(passages_dset.num_rows / batch_size) for i in range(n_batches): passages = list(passages_dset[i * batch_size : (i + 1) * batch_size]["passage_text"]) reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder, max_length, device) fp[i * batch_size : (i + 1) * batch_size] = reps if i % 50 == 0: print(i, time() - st_time) def evaluate_retriever(qa_list, retriever_func, scoring_func, n_ret=10, verbose=False): total_retriever_time = 0.0 total_retriever_score = 0.0 st_time = time() for i, (question, answer) in enumerate(qa_list): r_time = time() retrieved_passages = retriever_func(question, n_ret) total_retriever_time += time() - r_time total_retriever_score += scoring_func(retrieved_passages, answer) if verbose and ((i + 1) % 500 == 0 or i <= 1): print( "{:03d}: S-{:.4f} T-{:.4f} | {:.2f}".format( i + 1, total_retriever_score / (i + 1), total_retriever_time / (i + 1), time() - st_time ) ) return {"idf_recall": total_retriever_score / (i + 1), "retrieval_time": total_retriever_time / (i + 1)} # build a support document for the question out of Wikipedia snippets def query_qa_dense_index( question, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20, device="cuda:0" ): q_rep = embed_questions_for_retrieval([question], tokenizer, qa_embedder, device=device) D, I = wiki_index.search(q_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc in zip(res_list, D[0]): r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index(questions, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): q_rep = embed_questions_for_retrieval(questions, tokenizer, qa_embedder) D, I = wiki_index.search(q_rep, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for res_passages, dl in zip(res_passages_lst, D): res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] for r, sc in zip(res_list, dl): r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists # find nearest neighbors of an answer or declarative text in Wikipedia snippets def query_qa_dense_index_nn(passage, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10, min_length=20): a_rep = embed_passages_for_retrieval([passage], tokenizer, qa_embedder) D, I = wiki_index.search(a_rep, 2 * n_results) res_passages = [wiki_passages[int(i)] for i in I[0]] support_doc = "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] res_list = [res for res in res_list if len(res["passage_text"].split()) > min_length][:n_results] for r, sc, i in zip(res_list, D[0], I[0]): r["passage_id"] = int(i) r["score"] = float(sc) return support_doc, res_list def batch_query_qa_dense_index_nn(passages, qa_embedder, tokenizer, wiki_passages, wiki_index, n_results=10): a_reps = embed_passages_for_retrieval(passages, tokenizer, qa_embedder) D, I = wiki_index.search(a_reps, n_results) res_passages_lst = [[wiki_passages[int(i)] for i in i_lst] for i_lst in I] support_doc_lst = [ "<P> " + " <P> ".join([p["passage_text"] for p in res_passages]) for res_passages in res_passages_lst ] all_res_lists = [] for res_passages, dl, il in zip(res_passages_lst, D, I): res_list = [{k: p[k] for k in wiki_passages.column_names} for p in res_passages] for r, sc, i in zip(res_list, dl, il): r["passage_id"] = int(i) r["score"] = float(sc) all_res_lists += [res_list[:]] return support_doc_lst, all_res_lists
transformers/examples/research_projects/longform-qa/eli5_utils.py/0
{ "file_path": "transformers/examples/research_projects/longform-qa/eli5_utils.py", "repo_id": "transformers", "token_count": 13301 }
71
# coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for masked language modeling (BERT, ALBERT, RoBERTa...) with whole word masking on a text file or a dataset. Here is the full list of checkpoints on the hub that can be fine-tuned by this script: https://huggingface.co/models?filter=fill-mask """ # You can also adapt this script on your own masked language modeling task. Pointers for this are left as comments. import json import logging import math import os import sys from dataclasses import dataclass, field from typing import Optional from datasets import Dataset, load_dataset import transformers from transformers import ( CONFIG_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, DataCollatorForWholeWordMask, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_MASKED_LM_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. """ model_name_or_path: Optional[str] = field( default=None, metadata={ "help": ( "The model checkpoint for weights initialization. Don't set if you want to train a model from scratch." ) }, ) model_type: Optional[str] = field( default=None, metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(MODEL_TYPES)}, ) config_overrides: Optional[str] = field( default=None, metadata={ "help": ( "Override some existing default config settings when a model is trained from scratch. Example: " "n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index" ) }, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def __post_init__(self): if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None): raise ValueError( "--config_overrides can't be used in combination with --config_name or --model_name_or_path" ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, ) train_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input train ref data file for whole word masking in Chinese."}, ) validation_ref_file: Optional[str] = field( default=None, metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) validation_split_percentage: Optional[int] = field( default=5, metadata={ "help": "The percentage of the train set used as validation set in case there's no validation split" }, ) max_seq_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated. Default to the max input length of the model." ) }, ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) mlm_probability: float = field( default=0.15, metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to `max_seq_length`. " "If False, will pad the samples dynamically when batching to the maximum length in the batch." ) }, ) def __post_init__(self): if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file." def add_chinese_references(dataset, ref_file): with open(ref_file, "r", encoding="utf-8") as f: refs = [json.loads(line) for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())] assert len(dataset) == len(refs) dataset_dict = {c: dataset[c] for c in dataset.column_names} dataset_dict["chinese_ref"] = refs return Dataset.from_dict(dataset_dict) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) if "validation" not in datasets.keys(): datasets["validation"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[:{data_args.validation_split_percentage}%]", ) datasets["train"] = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=f"train[{data_args.validation_split_percentage}%:]", ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if extension == "txt": extension = "text" datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config_kwargs = { "cache_dir": model_args.cache_dir, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.config_name: config = AutoConfig.from_pretrained(model_args.config_name, **config_kwargs) elif model_args.model_name_or_path: config = AutoConfig.from_pretrained(model_args.model_name_or_path, **config_kwargs) else: config = CONFIG_MAPPING[model_args.model_type]() logger.warning("You are instantiating a new config instance from scratch.") if model_args.config_overrides is not None: logger.info(f"Overriding config: {model_args.config_overrides}") config.update_from_string(model_args.config_overrides) logger.info(f"New config: {config}") tokenizer_kwargs = { "cache_dir": model_args.cache_dir, "use_fast": model_args.use_fast_tokenizer, "revision": model_args.model_revision, "use_auth_token": True if model_args.use_auth_token else None, } if model_args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(model_args.tokenizer_name, **tokenizer_kwargs) elif model_args.model_name_or_path: tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path, **tokenizer_kwargs) else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script. " "You can do it from another script, save it, and load it from here, using --tokenizer_name." ) if model_args.model_name_or_path: model = AutoModelForMaskedLM.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=True if model_args.use_auth_token else None, ) else: logger.info("Training new model from scratch") model = AutoModelForMaskedLM.from_config(config) model.resize_token_embeddings(len(tokenizer)) # Preprocessing the datasets. # First we tokenize all the texts. if training_args.do_train: column_names = datasets["train"].column_names else: column_names = datasets["validation"].column_names text_column_name = "text" if "text" in column_names else column_names[0] padding = "max_length" if data_args.pad_to_max_length else False def tokenize_function(examples): # Remove empty lines examples["text"] = [line for line in examples["text"] if len(line) > 0 and not line.isspace()] return tokenizer(examples["text"], padding=padding, truncation=True, max_length=data_args.max_seq_length) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=[text_column_name], load_from_cache_file=not data_args.overwrite_cache, ) # Add the chinese references if provided if data_args.train_ref_file is not None: tokenized_datasets["train"] = add_chinese_references(tokenized_datasets["train"], data_args.train_ref_file) if data_args.validation_ref_file is not None: tokenized_datasets["validation"] = add_chinese_references( tokenized_datasets["validation"], data_args.validation_ref_file ) # If we have ref files, need to avoid it removed by trainer has_ref = data_args.train_ref_file or data_args.validation_ref_file if has_ref: training_args.remove_unused_columns = False # Data collator # This one will take care of randomly masking the tokens. data_collator = DataCollatorForWholeWordMask(tokenizer=tokenizer, mlm_probability=data_args.mlm_probability) # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=tokenized_datasets["train"] if training_args.do_train else None, eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() # Saves the tokenizer too for easy upload output_train_file = os.path.join(training_args.output_dir, "train_results.txt") if trainer.is_world_process_zero(): with open(output_train_file, "w") as writer: logger.info("***** Train results *****") for key, value in sorted(train_result.metrics.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir, "trainer_state.json")) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") eval_output = trainer.evaluate() perplexity = math.exp(eval_output["eval_loss"]) results["perplexity"] = perplexity output_eval_file = os.path.join(training_args.output_dir, "eval_results_mlm_wwm.txt") if trainer.is_world_process_zero(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in sorted(results.items()): logger.info(f" {key} = {value}") writer.write(f"{key} = {value}\n") return results def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
transformers/examples/research_projects/mlm_wwm/run_mlm_wwm.py/0
{ "file_path": "transformers/examples/research_projects/mlm_wwm/run_mlm_wwm.py", "repo_id": "transformers", "token_count": 7241 }
72
# Sample script to finetune RAG using Ray for distributed retrieval. # Add parent directory to python path to access lightning_base.py export PYTHONPATH="../":"${PYTHONPATH}" #creates the custom knowlegebase python use_own_knowledge_dataset.py \ --csv_path /DIR/SQUAD-KB/squad-kb.csv \ --output_dir /DIR/SQUAD-KB # Start a single-node Ray cluster. ray start --head # A sample finetuning run, you need to specify data_dir, output_dir and model_name_or_path # run ./examples/rag/finetune_rag_ray.sh --help to see all the possible options python finetune_rag.py \ --data_dir /DIR/squad-training-data \ --output_dir /DIR/model_checkpoints \ --model_name_or_path facebook/rag-token-base \ --model_type rag_token \ --fp16 \ --gpus 2 \ --profile \ --do_train \ --end2end \ --do_predict \ --n_val -1 \ --train_batch_size 4 \ --eval_batch_size 1 \ --max_source_length 128 \ --max_target_length 25 \ --val_max_target_length 25 \ --test_max_target_length 25 \ --label_smoothing 0.1 \ --dropout 0.1 \ --attention_dropout 0.1 \ --weight_decay 0.001 \ --adam_epsilon 1e-08 \ --max_grad_norm 0.1 \ --lr_scheduler polynomial \ --learning_rate 3e-05 \ --num_train_epochs 10 \ --warmup_steps 500 \ --gradient_accumulation_steps 8 \ --distributed_retriever ray \ --num_retrieval_workers 4 \ --passages_path /DIR/SQUAD-KB/my_knowledge_dataset \ --index_path /DIR/SQUAD-KB/my_knowledge_dataset_hnsw_index.faiss \ --index_name custom \ --context_encoder_name facebook/dpr-ctx_encoder-multiset-base \ --csv_path /DIR/SQUAD-KB/squad-kb.csv \ --index_gpus 1 \ --gpu_order [5,6,7,8,9,0,1,2,3,4] \ --shard_dir ./test_dir/kb-shards \ --indexing_freq 500 # Stop the Ray cluster. ray stop #this script was used to test the SQuAD data. #change the dir paramater acording to your prefernece. #please use the same device ordere when running CUDA_VISIBLE_DEVICES=5,6,7,8,9,0,1,2,3,4 sh finetune_rag_ray_end2end.sh
transformers/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh/0
{ "file_path": "transformers/examples/research_projects/rag-end2end-retriever/finetune_rag_ray_end2end.sh", "repo_id": "transformers", "token_count": 876 }
73
#!/usr/bin/env python import os from pathlib import Path from typing import Dict, List import fire import torch from transformers import AutoModelForSeq2SeqLM, AutoTokenizer from transformers.utils.logging import get_logger logger = get_logger(__name__) def remove_prefix(text: str, prefix: str): if text.startswith(prefix): return text[len(prefix) :] return text # or whatever def sanitize(sd): return {remove_prefix(k, "model."): v for k, v in sd.items()} def average_state_dicts(state_dicts: List[Dict[str, torch.Tensor]]): new_sd = {} for k in state_dicts[0].keys(): tensors = [sd[k] for sd in state_dicts] new_t = sum(tensors) / len(tensors) assert isinstance(new_t, torch.Tensor) new_sd[k] = new_t return new_sd def convert_pl_to_hf(pl_ckpt_path: str, hf_src_model_dir: str, save_path: str) -> None: """Cleanup a pytorch-lightning .ckpt file or experiment dir and save a huggingface model with that state dict. Silently allows extra pl keys (like teacher.) Puts all ckpt models into CPU RAM at once! Args: pl_ckpt_path (:obj:`str`): Path to a .ckpt file saved by pytorch_lightning or dir containing ckpt files. If a directory is passed, all .ckpt files inside it will be averaged! hf_src_model_dir (:obj:`str`): Path to a directory containing a correctly shaped checkpoint save_path (:obj:`str`): Directory to save the new model """ hf_model = AutoModelForSeq2SeqLM.from_pretrained(hf_src_model_dir) if os.path.isfile(pl_ckpt_path): ckpt_files = [pl_ckpt_path] else: assert os.path.isdir(pl_ckpt_path) ckpt_files = list(Path(pl_ckpt_path).glob("*.ckpt")) assert ckpt_files, f"could not find any ckpt files inside the {pl_ckpt_path} directory" if len(ckpt_files) > 1: logger.info(f"averaging the weights of {ckpt_files}") state_dicts = [sanitize(torch.load(x, map_location="cpu")["state_dict"]) for x in ckpt_files] state_dict = average_state_dicts(state_dicts) missing, unexpected = hf_model.load_state_dict(state_dict, strict=False) assert not missing, f"missing keys: {missing}" hf_model.save_pretrained(save_path) try: tok = AutoTokenizer.from_pretrained(hf_src_model_dir) tok.save_pretrained(save_path) except Exception: pass # dont copy tokenizer if cant if __name__ == "__main__": fire.Fire(convert_pl_to_hf)
transformers/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/convert_pl_checkpoint_to_hf.py", "repo_id": "transformers", "token_count": 1017 }
74
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export BS=32 export GAS=1 python finetune.py \ --learning_rate=3e-5 \ --fp16 \ --gpus 1 \ --do_train \ --do_predict \ --val_check_interval 0.25 \ --n_val 500 \ --num_train_epochs 2 \ --freeze_encoder --freeze_embeds --data_dir cnn_dm \ --max_target_length 142 --val_max_target_length=142 \ --train_batch_size=$BS --eval_batch_size=$BS --gradient_accumulation_steps=$GAS \ --model_name_or_path sshleifer/student_cnn_12_6 \ --tokenizer_name facebook/bart-large \ --warmup_steps 500 \ --output_dir distilbart-cnn-12-6 \ "$@"
transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh/0
{ "file_path": "transformers/examples/research_projects/seq2seq-distillation/train_distilbart_cnn.sh", "repo_id": "transformers", "token_count": 292 }
75
# VisualBERT Demo This demo shows usage of VisualBERT VQA model and is adapted from LXMERT demo present [here](https://github.com/huggingface/transformers/blob/main/examples/research_projects/lxmert/demo.ipynb). 1. make a virtualenv: ``virtualenv venv`` and activate ``source venv/bin/activate`` 2. install reqs: ``pip install -r ./requirements.txt`` 3. usage is as shown in demo.ipynb
transformers/examples/research_projects/visual_bert/README.md/0
{ "file_path": "transformers/examples/research_projects/visual_bert/README.md", "repo_id": "transformers", "token_count": 127 }
76
# Parts of the code are adapted from the snippets provided in the TorchAudio Wav2Vec forced alignment tutorial. # The full tutorial can be found here: https://pytorch.org/audio/stable/tutorials/forced_alignment_tutorial.html import argparse import os from dataclasses import dataclass import torch import torchaudio from tqdm import tqdm from transformers import AutoConfig, AutoModelForCTC, AutoProcessor class Wav2Vec2Aligner: def __init__(self, model_name, input_wavs_sr, cuda): self.cuda = cuda self.config = AutoConfig.from_pretrained(model_name) self.model = AutoModelForCTC.from_pretrained(model_name) self.model.eval() if self.cuda: self.model.to(device="cuda") self.processor = AutoProcessor.from_pretrained(model_name) self.resampler = torchaudio.transforms.Resample(input_wavs_sr, 16_000) blank_id = 0 vocab = list(self.processor.tokenizer.get_vocab().keys()) for i in range(len(vocab)): if vocab[i] == "[PAD]" or vocab[i] == "<pad>": blank_id = i print("Blank Token id [PAD]/<pad>", blank_id) self.blank_id = blank_id def speech_file_to_array_fn(self, wav_path): speech_array, sampling_rate = torchaudio.load(wav_path) speech = self.resampler(speech_array).squeeze().numpy() return speech def align_single_sample(self, item): blank_id = self.blank_id transcript = "|".join(item["sent"].split(" ")) if not os.path.isfile(item["wav_path"]): print(item["wav_path"], "not found in wavs directory") speech_array = self.speech_file_to_array_fn(item["wav_path"]) inputs = self.processor(speech_array, sampling_rate=16_000, return_tensors="pt", padding=True) if self.cuda: inputs = inputs.to(device="cuda") with torch.no_grad(): logits = self.model(inputs.input_values).logits # get the emission probability at frame level emissions = torch.log_softmax(logits, dim=-1) emission = emissions[0].cpu().detach() # get labels from vocab labels = ([""] + list(self.processor.tokenizer.get_vocab().keys()))[ :-1 ] # logits don't align with the tokenizer's vocab dictionary = {c: i for i, c in enumerate(labels)} tokens = [] for c in transcript: if c in dictionary: tokens.append(dictionary[c]) def get_trellis(emission, tokens, blank_id=0): """ Build a trellis matrix of shape (num_frames + 1, num_tokens + 1) that represents the probabilities of each source token being at a certain time step """ num_frames = emission.size(0) num_tokens = len(tokens) # Trellis has extra diemsions for both time axis and tokens. # The extra dim for tokens represents <SoS> (start-of-sentence) # The extra dim for time axis is for simplification of the code. trellis = torch.full((num_frames + 1, num_tokens + 1), -float("inf")) trellis[:, 0] = 0 for t in range(num_frames): trellis[t + 1, 1:] = torch.maximum( # Score for staying at the same token trellis[t, 1:] + emission[t, blank_id], # Score for changing to the next token trellis[t, :-1] + emission[t, tokens], ) return trellis trellis = get_trellis(emission, tokens, blank_id) @dataclass class Point: token_index: int time_index: int score: float def backtrack(trellis, emission, tokens, blank_id=0): """ Walk backwards from the last (sentence_token, time_step) pair to build the optimal sequence alignment path """ # Note: # j and t are indices for trellis, which has extra dimensions # for time and tokens at the beginning. # When referring to time frame index `T` in trellis, # the corresponding index in emission is `T-1`. # Similarly, when referring to token index `J` in trellis, # the corresponding index in transcript is `J-1`. j = trellis.size(1) - 1 t_start = torch.argmax(trellis[:, j]).item() path = [] for t in range(t_start, 0, -1): # 1. Figure out if the current position was stay or change # Note (again): # `emission[J-1]` is the emission at time frame `J` of trellis dimension. # Score for token staying the same from time frame J-1 to T. stayed = trellis[t - 1, j] + emission[t - 1, blank_id] # Score for token changing from C-1 at T-1 to J at T. changed = trellis[t - 1, j - 1] + emission[t - 1, tokens[j - 1]] # 2. Store the path with frame-wise probability. prob = emission[t - 1, tokens[j - 1] if changed > stayed else 0].exp().item() # Return token index and time index in non-trellis coordinate. path.append(Point(j - 1, t - 1, prob)) # 3. Update the token if changed > stayed: j -= 1 if j == 0: break else: raise ValueError("Failed to align") return path[::-1] path = backtrack(trellis, emission, tokens, blank_id) @dataclass class Segment: label: str start: int end: int score: float def __repr__(self): return f"{self.label}\t{self.score:4.2f}\t{self.start*20:5d}\t{self.end*20:5d}" @property def length(self): return self.end - self.start def merge_repeats(path): """ Merge repeated tokens into a single segment. Note: this shouldn't affect repeated characters from the original sentences (e.g. `ll` in `hello`) """ i1, i2 = 0, 0 segments = [] while i1 < len(path): while i2 < len(path) and path[i1].token_index == path[i2].token_index: i2 += 1 score = sum(path[k].score for k in range(i1, i2)) / (i2 - i1) segments.append( Segment( transcript[path[i1].token_index], path[i1].time_index, path[i2 - 1].time_index + 1, score, ) ) i1 = i2 return segments segments = merge_repeats(path) with open(item["out_path"], "w") as out_align: for seg in segments: out_align.write(str(seg) + "\n") def align_data(self, wav_dir, text_file, output_dir): if not os.path.exists(output_dir): os.makedirs(output_dir) # load text file lines = open(text_file, encoding="utf8").readlines() items = [] for line in lines: if len(line.strip().split("\t")) != 2: print("Script must be in format: 00001 this is my sentence") exit() wav_name, sentence = line.strip().split("\t") wav_path = os.path.join(wav_dir, wav_name + ".wav") out_path = os.path.join(output_dir, wav_name + ".txt") items.append({"sent": sentence, "wav_path": wav_path, "out_path": out_path}) print("Number of samples found in script file", len(items)) for item in tqdm(items): self.align_single_sample(item) def main(): parser = argparse.ArgumentParser() parser.add_argument( "--model_name", type=str, default="arijitx/wav2vec2-xls-r-300m-bengali", help="wav2vec model name" ) parser.add_argument("--wav_dir", type=str, default="./wavs", help="directory containing wavs") parser.add_argument("--text_file", type=str, default="script.txt", help="file containing text") parser.add_argument("--input_wavs_sr", type=int, default=16000, help="sampling rate of input audios") parser.add_argument( "--output_dir", type=str, default="./out_alignment", help="output directory containing the alignment files" ) parser.add_argument("--cuda", action="store_true") args = parser.parse_args() aligner = Wav2Vec2Aligner(args.model_name, args.input_wavs_sr, args.cuda) aligner.align_data(args.wav_dir, args.text_file, args.output_dir) if __name__ == "__main__": main()
transformers/examples/research_projects/wav2vec2/alignment.py/0
{ "file_path": "transformers/examples/research_projects/wav2vec2/alignment.py", "repo_id": "transformers", "token_count": 4170 }
77
<!--- Copyright 2022 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # XTREME-S benchmark examples *Maintainers: [Anton Lozhkov](https://github.com/anton-l) and [Patrick von Platen](https://github.com/patrickvonplaten)* The Cross-lingual TRansfer Evaluation of Multilingual Encoders for Speech (XTREME-S) benchmark is a benchmark designed to evaluate speech representations across languages, tasks, domains and data regimes. It covers XX typologically diverse languages and seven downstream tasks grouped in four families: speech recognition, translation, classification and retrieval. XTREME-S covers speech recognition with Fleurs, Multilingual LibriSpeech (MLS) and VoxPopuli, speech translation with CoVoST-2, speech classification with LangID (Fleurs) and intent classification (MInds-14) and finally speech(-text) retrieval with Fleurs. Each of the tasks covers a subset of the 102 languages included in XTREME-S (shown here with their ISO 3166-1 codes): afr, amh, ara, asm, ast, azj, bel, ben, bos, cat, ceb, ces, cmn, cym, dan, deu, ell, eng, spa, est, fas, ful, fin, tgl, fra, gle, glg, guj, hau, heb, hin, hrv, hun, hye, ind, ibo, isl, ita, jpn, jav, kat, kam, kea, kaz, khm, kan, kor, ckb, kir, ltz, lug, lin, lao, lit, luo, lav, mri, mkd, mal, mon, mar, msa, mlt, mya, nob, npi, nld, nso, nya, oci, orm, ory, pan, pol, pus, por, ron, rus, bul, snd, slk, slv, sna, som, srp, swe, swh, tam, tel, tgk, tha, tur, ukr, umb, urd, uzb, vie, wol, xho, yor, yue and zul. Paper: [XTREME-S: Evaluating Cross-lingual Speech Representations](https://arxiv.org/abs/2203.10752) Dataset: [https://huggingface.co/datasets/google/xtreme_s](https://huggingface.co/datasets/google/xtreme_s) ## Fine-tuning for the XTREME-S tasks Based on the [`run_xtreme_s.py`](https://github.com/huggingface/transformers/blob/main/examples/research_projects/xtreme-s/run_xtreme_s.py) script. This script can fine-tune any of the pretrained speech models on the [hub](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition) on the [XTREME-S dataset](https://huggingface.co/datasets/google/xtreme_s) tasks. XTREME-S is made up of 7 different tasks. Here is how to run the script on each of them: ```bash export TASK_NAME=mls.all python run_xtreme_s.py \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --task="${TASK_NAME}" \ --output_dir="xtreme_s_xlsr_${TASK_NAME}" \ --num_train_epochs=100 \ --per_device_train_batch_size=32 \ --learning_rate="3e-4" \ --target_column_name="transcription" \ --save_steps=500 \ --eval_steps=500 \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --push_to_hub ``` where `TASK_NAME` can be one of: `mls, voxpopuli, covost2, fleurs-asr, fleurs-lang_id, minds14`. We get the following results on the test set of the benchmark's datasets. The corresponding training commands for each dataset are given in the sections below: | Task | Dataset | Result | Fine-tuned model & logs | Training time | GPUs | |-----------------------|-----------|-----------------------|--------------------------------------------------------------------|---------------|--------| | Speech Recognition | MLS | 30.33 WER | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_mls/) | 18:47:25 | 8xV100 | | Speech Recognition | VoxPopuli | - | - | - | - | | Speech Recognition | FLEURS | - | - | - | - | | Speech Translation | CoVoST-2 | - | - | - | - | | Speech Classification | Minds-14 | 90.15 F1 / 90.33 Acc. | [here](https://huggingface.co/anton-l/xtreme_s_xlsr_300m_minds14/) | 2:54:21 | 2xA100 | | Speech Classification | FLEURS | - | - | - | - | | Speech Retrieval | FLEURS | - | - | - | - | ### Speech Recognition with MLS The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#multilingual-librispeech-mls) using 8 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=8 \ run_xtreme_s.py \ --task="mls" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_mls" \ --overwrite_output_dir \ --num_train_epochs=100 \ --per_device_train_batch_size=4 \ --per_device_eval_batch_size=1 \ --gradient_accumulation_steps=2 \ --learning_rate="3e-4" \ --warmup_steps=3000 \ --eval_strategy="steps" \ --max_duration_in_seconds=20 \ --save_steps=500 \ --eval_steps=500 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="wer" \ --greater_is_better=False \ --load_best_model_at_end \ --push_to_hub ``` On 8 V100 GPUs, this script should run in ~19 hours and yield a cross-entropy loss of **0.6215** and word error rate of **30.33** ### Speech Classification with Minds-14 The following command shows how to fine-tune the [XLS-R](https://huggingface.co/docs/transformers/main/model_doc/xls_r) model on [XTREME-S MLS](https://huggingface.co/datasets/google/xtreme_s#intent-classification---minds-14) using 2 GPUs in half-precision. ```bash python -m torch.distributed.launch \ --nproc_per_node=2 \ run_xtreme_s.py \ --task="minds14" \ --language="all" \ --model_name_or_path="facebook/wav2vec2-xls-r-300m" \ --output_dir="xtreme_s_xlsr_300m_minds14" \ --overwrite_output_dir \ --num_train_epochs=50 \ --per_device_train_batch_size=32 \ --per_device_eval_batch_size=8 \ --gradient_accumulation_steps=1 \ --learning_rate="3e-4" \ --warmup_steps=1500 \ --eval_strategy="steps" \ --max_duration_in_seconds=30 \ --save_steps=200 \ --eval_steps=200 \ --logging_steps=1 \ --layerdrop=0.0 \ --mask_time_prob=0.3 \ --mask_time_length=10 \ --mask_feature_prob=0.1 \ --mask_feature_length=64 \ --freeze_feature_encoder \ --gradient_checkpointing \ --fp16 \ --group_by_length \ --do_train \ --do_eval \ --do_predict \ --metric_for_best_model="f1" \ --greater_is_better=True \ --load_best_model_at_end \ --push_to_hub ``` On 2 A100 GPUs, this script should run in ~5 hours and yield a cross-entropy loss of **0.4119** and F1 score of **90.15**
transformers/examples/research_projects/xtreme-s/README.md/0
{ "file_path": "transformers/examples/research_projects/xtreme-s/README.md", "repo_id": "transformers", "token_count": 3399 }
78
#!/usr/bin/env python # coding=utf-8 # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for summarization. """ # You can also adapt this script on your own sequence to sequence task. Pointers for this are left as comments. import json import logging import os import sys from dataclasses import dataclass, field from typing import Optional import datasets import evaluate import nltk # Here to have a nice missing dependency error message early on import numpy as np import tensorflow as tf from datasets import load_dataset from filelock import FileLock import transformers from transformers import ( AutoConfig, AutoTokenizer, DataCollatorForSeq2Seq, HfArgumentParser, KerasMetricCallback, PushToHubCallback, TFAutoModelForSeq2SeqLM, TFTrainingArguments, create_optimizer, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, is_offline_mode, send_example_telemetry from transformers.utils.versions import require_version # region Checking dependencies # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.49.0.dev0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/summarization/requirements.txt") logger = logging.getLogger(__name__) try: nltk.data.find("tokenizers/punkt") except (LookupError, OSError): if is_offline_mode(): raise LookupError( "Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files" ) with FileLock(".lock") as lock: nltk.download("punkt", quiet=True) # endregion # region Arguments @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where to store the pretrained models downloaded from huggingface.co"}, ) use_fast_tokenizer: bool = field( default=True, metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) token: str = field( default=None, metadata={ "help": ( "The token to use as HTTP bearer authorization for remote files. If not specified, will use the token " "generated when running `huggingface-cli login` (stored in `~/.huggingface`)." ) }, ) trust_remote_code: bool = field( default=False, metadata={ "help": ( "Whether to trust the execution of code from datasets/models defined on the Hub." " This option should only be set to `True` for repositories you trust and in which you have read the" " code, as it will execute code present on the Hub on your local machine." ) }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) text_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the full texts (for summarization)."}, ) summary_column: Optional[str] = field( default=None, metadata={"help": "The name of the column in the datasets containing the summaries (for summarization)."}, ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a jsonlines or csv file)."} ) validation_file: Optional[str] = field( default=None, metadata={ "help": ( "An optional input evaluation data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." ) }, ) test_file: Optional[str] = field( default=None, metadata={ "help": "An optional input test data file to evaluate the metrics (rouge) on (a jsonlines or csv file)." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) max_source_length: Optional[int] = field( default=1024, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) max_target_length: Optional[int] = field( default=128, metadata={ "help": ( "The maximum total sequence length for target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) val_max_target_length: Optional[int] = field( default=None, metadata={ "help": ( "The maximum total sequence length for validation target text after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. " "This argument is also used to override the ``max_length`` param of ``model.generate``, which is used " "during ``evaluate`` and ``predict``." ) }, ) pad_to_max_length: bool = field( default=False, metadata={ "help": ( "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) max_predict_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of prediction examples to this " "value if set." ) }, ) num_beams: Optional[int] = field( default=1, metadata={ "help": ( "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, " "which is used during ``evaluate`` and ``predict``." ) }, ) ignore_pad_token_for_loss: bool = field( default=True, metadata={ "help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not." }, ) source_prefix: Optional[str] = field( default=None, metadata={"help": "A prefix to add before every source text (useful for T5 models)."} ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if self.val_max_target_length is None: self.val_max_target_length = self.max_target_length # endregion # region Dataset name mappings summarization_name_mapping = { "amazon_reviews_multi": ("review_body", "review_title"), "big_patent": ("description", "abstract"), "cnn_dailymail": ("article", "highlights"), "orange_sum": ("text", "summary"), "pn_summary": ("article", "summary"), "psc": ("extract_text", "summary_text"), "samsum": ("dialogue", "summary"), "thaisum": ("body", "summary"), "xglue": ("news_body", "news_title"), "xsum": ("document", "summary"), "wiki_summary": ("article", "highlights"), "multi_news": ("document", "summary"), } # endregion def main(): # region Argument parsing # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_summarization", model_args, data_args, framework="tensorflow") # endregion # region Logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO) datasets.utils.logging.set_verbosity(logging.INFO) transformers.utils.logging.set_verbosity(logging.INFO) # Log on each process the small summary: logger.info(f"Training/evaluation parameters {training_args}") # endregion # region T5 special-casing if data_args.source_prefix is None and model_args.model_name_or_path in [ "google-t5/t5-small", "google-t5/t5-base", "google-t5/t5-large", "google-t5/t5-3b", "google-t5/t5-11b", ]: logger.warning( "You're running a t5 model but didn't provide a source prefix, which is the expected, e.g. with " "`--source_prefix 'summarize: ' `" ) # endregion # region Detecting last checkpoint last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # endregion # Set seed before initializing model. set_seed(training_args.seed) # region Load datasets # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files this script will use the first column for the full texts and the second column for the # summaries (unless you specify column names for this with the `text_column` and `summary_column` arguments). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset( data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file extension = data_args.train_file.split(".")[-1] if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file extension = data_args.validation_file.split(".")[-1] if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.test_file.split(".")[-1] raw_datasets = load_dataset( extension, data_files=data_files, cache_dir=model_args.cache_dir, token=model_args.token, ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets. # endregion # region Load model config and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast_tokenizer, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) prefix = data_args.source_prefix if data_args.source_prefix is not None else "" # endregion # region Dataset preprocessing # We need to tokenize inputs and targets. if training_args.do_train: column_names = raw_datasets["train"].column_names elif training_args.do_eval: column_names = raw_datasets["validation"].column_names else: logger.info("There is nothing to do. Please pass `do_train`, and/or `do_eval`.") return # Get the column names for input/target. dataset_columns = summarization_name_mapping.get(data_args.dataset_name, None) if data_args.text_column is None: text_column = dataset_columns[0] if dataset_columns is not None else column_names[0] else: text_column = data_args.text_column if text_column not in column_names: raise ValueError( f"--text_column' value '{data_args.text_column}' needs to be one of: {', '.join(column_names)}" ) if data_args.summary_column is None: summary_column = dataset_columns[1] if dataset_columns is not None else column_names[1] else: summary_column = data_args.summary_column if summary_column not in column_names: raise ValueError( f"--summary_column' value '{data_args.summary_column}' needs to be one of: {', '.join(column_names)}" ) # Temporarily set max_target_length for training. max_target_length = data_args.max_target_length padding = "max_length" if data_args.pad_to_max_length else False def preprocess_function(examples): inputs = examples[text_column] targets = examples[summary_column] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=data_args.max_source_length, padding=padding, truncation=True) # Tokenize targets with the `text_target` keyword argument labels = tokenizer(text_target=targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and data_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs if training_args.do_train: if "train" not in raw_datasets: raise ValueError("--do_train requires a train dataset") train_dataset = raw_datasets["train"] if data_args.max_train_samples is not None: max_train_samples = min(len(train_dataset), data_args.max_train_samples) train_dataset = train_dataset.select(range(max_train_samples)) train_dataset = train_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on train dataset", ) else: train_dataset = None if training_args.do_eval: max_target_length = data_args.val_max_target_length if "validation" not in raw_datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = raw_datasets["validation"] if data_args.max_eval_samples is not None: max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) eval_dataset = eval_dataset.select(range(max_eval_samples)) eval_dataset = eval_dataset.map( preprocess_function, batched=True, num_proc=data_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not data_args.overwrite_cache, desc="Running tokenizer on validation dataset", ) else: eval_dataset = None # endregion # region Text preprocessing def postprocess_text(preds, labels): preds = [pred.strip() for pred in preds] labels = [label.strip() for label in labels] # rougeLSum expects newline after each sentence preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds] labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels] return preds, labels # endregion with training_args.strategy.scope(): # region Prepare model model = TFAutoModelForSeq2SeqLM.from_pretrained( model_args.model_name_or_path, config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, token=model_args.token, trust_remote_code=model_args.trust_remote_code, ) # We resize the embeddings only when necessary to avoid index errors. If you are creating a model from scratch # on a small vocab and want a smaller embedding size, remove this test. embeddings = model.get_input_embeddings() # Matt: This is a temporary workaround as we transition our models to exclusively using Keras embeddings. # As soon as the transition is complete, all embeddings should be keras.Embeddings layers, and # the weights will always be in embeddings.embeddings. if hasattr(embeddings, "embeddings"): embedding_size = embeddings.embeddings.shape[0] else: embedding_size = embeddings.weight.shape[0] if len(tokenizer) > embedding_size: model.resize_token_embeddings(len(tokenizer)) # endregion # region Prepare TF Dataset objects if model.config.decoder_start_token_id is None: raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined") label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id data_collator = DataCollatorForSeq2Seq( tokenizer, model=model, label_pad_token_id=label_pad_token_id, pad_to_multiple_of=128, # Reduce the number of unique shapes for XLA, especially for generation return_tensors="np", ) dataset_options = tf.data.Options() dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF num_replicas = training_args.strategy.num_replicas_in_sync total_train_batch_size = training_args.per_device_train_batch_size * num_replicas total_eval_batch_size = training_args.per_device_eval_batch_size * num_replicas # model.prepare_tf_dataset() wraps a Hugging Face dataset in a tf.data.Dataset which is ready to use in # training. This is the recommended way to use a Hugging Face dataset when training with Keras. You can also # use the lower-level dataset.to_tf_dataset() method, but you will have to specify things like column names # yourself if you use this method, whereas they are automatically inferred from the model input names when # using model.prepare_tf_dataset() # For more info see the docs: # https://huggingface.co/docs/transformers/main/en/main_classes/model#transformers.TFPreTrainedModel.prepare_tf_dataset # https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.Dataset.to_tf_dataset tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=data_collator, batch_size=total_train_batch_size, shuffle=True, ).with_options(dataset_options) tf_eval_dataset = model.prepare_tf_dataset( eval_dataset, collate_fn=data_collator, batch_size=total_eval_batch_size, shuffle=False, ).with_options(dataset_options) # endregion # region Optimizer, loss and LR scheduling num_train_steps = int(len(tf_train_dataset) * training_args.num_train_epochs) if training_args.warmup_steps > 0: num_warmup_steps = training_args.warmup_steps elif training_args.warmup_ratio > 0: num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) else: num_warmup_steps = 0 if training_args.do_train: optimizer, lr_schedule = create_optimizer( init_lr=training_args.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, adam_beta1=training_args.adam_beta1, adam_beta2=training_args.adam_beta2, adam_epsilon=training_args.adam_epsilon, weight_decay_rate=training_args.weight_decay, adam_global_clipnorm=training_args.max_grad_norm, ) else: optimizer = "sgd" # Just write anything because we won't be using it # endregion # region Metric and KerasMetricCallback if training_args.do_eval: metric = evaluate.load("rouge", cache_dir=model_args.cache_dir) if data_args.val_max_target_length is None: data_args.val_max_target_length = data_args.max_target_length gen_kwargs = { "max_length": data_args.val_max_target_length if data_args is not None else config.max_length, "num_beams": data_args.num_beams, "no_repeat_ngram_size": 0, # Not supported under XLA right now, and some models set it by default } def compute_metrics(preds): predictions, labels = preds if isinstance(predictions, tuple): predictions = predictions[0] decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metrics = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True) # Only print the mid f-measures, but there are a lot of other statistics in there too! metrics = {key: round(val.mid.fmeasure * 100, 4) for key, val in metrics.items()} return metrics # The KerasMetricCallback allows metrics that are too complex to write as standard Keras metrics # to be computed each epoch. Any Python code can be included in the metric_fn. This is especially # useful for metrics like BLEU and ROUGE that perform string comparisons on decoded model outputs. # For more information, see the docs at # https://huggingface.co/docs/transformers/main_classes/keras_callbacks#transformers.KerasMetricCallback metric_callback = KerasMetricCallback( metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, predict_with_generate=True, use_xla_generation=True, generate_kwargs=gen_kwargs, ) callbacks = [metric_callback] else: callbacks = [] # endregion # region Preparing push_to_hub and model card push_to_hub_model_id = training_args.push_to_hub_model_id model_name = model_args.model_name_or_path.split("/")[-1] if not push_to_hub_model_id: if data_args.dataset_name is not None: push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" else: push_to_hub_model_id = f"{model_name}-finetuned-summarization" model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "summarization"} if data_args.dataset_name is not None: model_card_kwargs["dataset_tags"] = data_args.dataset_name if data_args.dataset_config_name is not None: model_card_kwargs["dataset_args"] = data_args.dataset_config_name model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" else: model_card_kwargs["dataset"] = data_args.dataset_name if training_args.push_to_hub: # Because this training can be quite long, we save once per epoch. callbacks.append( PushToHubCallback( output_dir=training_args.output_dir, hub_model_id=push_to_hub_model_id, hub_token=training_args.push_to_hub_token, tokenizer=tokenizer, **model_card_kwargs, ) ) # endregion # region Training # Transformers models compute the right loss for their task by default when labels are passed, and will # use this for training unless you specify your own loss function in compile(). model.compile(optimizer=optimizer, jit_compile=training_args.xla) eval_metrics = None if training_args.do_train: logger.info("***** Running training *****") logger.info(f" Num examples = {len(train_dataset)}") logger.info(f" Num Epochs = {training_args.num_train_epochs}") logger.info(f" Instantaneous batch size per device = {training_args.per_device_train_batch_size}") logger.info(f" Total train batch size = {total_train_batch_size}") logger.info(f" Total optimization steps = {num_train_steps}") if training_args.xla and not data_args.pad_to_max_length: logger.warning( "XLA training may be slow at first when --pad_to_max_length is not set " "until all possible shapes have been compiled." ) history = model.fit(tf_train_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) eval_metrics = {key: val[-1] for key, val in history.history.items()} # endregion # region Validation if training_args.do_eval and not training_args.do_train: # Do a standalone evaluation run logger.info("Evaluation...") # Compiling generation with XLA yields enormous speedups, see https://huggingface.co/blog/tf-xla-generate @tf.function(jit_compile=True) def generate(**kwargs): return model.generate(**kwargs) for batch, labels in tf_eval_dataset: batch.update(gen_kwargs) generated_tokens = generate(**batch) if isinstance(generated_tokens, tuple): generated_tokens = generated_tokens[0] decoded_preds = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) labels = np.where(labels != -100, labels, tokenizer.pad_token_id) decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True) decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels) metric.add_batch(predictions=decoded_preds, references=decoded_labels) eval_metrics = metric.compute(use_stemmer=True) result = {key: round(val.mid.fmeasure * 100, 4) for key, val in eval_metrics.items()} logger.info(result) # endregion if training_args.output_dir is not None and eval_metrics is not None: output_eval_file = os.path.join(training_args.output_dir, "all_results.json") with open(output_eval_file, "w") as writer: writer.write(json.dumps(eval_metrics)) if training_args.output_dir is not None and not training_args.push_to_hub: # If we're not pushing to hub, at least save a local copy when we're done model.save_pretrained(training_args.output_dir) if __name__ == "__main__": main()
transformers/examples/tensorflow/summarization/run_summarization.py/0
{ "file_path": "transformers/examples/tensorflow/summarization/run_summarization.py", "repo_id": "transformers", "token_count": 13633 }
79
import torch from transformers import pipeline, AutoTokenizer, AutoModel, AutoModelForMaskedLM import time test_sentence = 'Do you [MASK] the muffin man?' # for comparison bert = pipeline('fill-mask', model = 'bert-base-uncased') print('\n'.join([d['sequence'] for d in bert(test_sentence)])) deberta = pipeline('fill-mask', model = 'microsoft/deberta-v3-base', model_kwargs={"legacy": False}) print('\n'.join([d['sequence'] for d in deberta(test_sentence)])) tokenizer = AutoTokenizer.from_pretrained("microsoft/deberta-v3-base") tokenized_dict = tokenizer( ["Is this working",], ["Not yet",], return_tensors="pt" ) deberta.model.forward = torch.compile(deberta.model.forward) start=time.time() deberta.model(**tokenized_dict) end=time.time() print(end-start) start=time.time() deberta.model(**tokenized_dict) end=time.time() print(end-start) start=time.time() deberta.model(**tokenized_dict) end=time.time() print(end-start) model = AutoModel.from_pretrained('microsoft/deberta-base') model.config.return_dict = False model.config.output_hidden_states=False input_tuple = (tokenized_dict['input_ids'], tokenized_dict['attention_mask']) start=time.time() traced_model = torch.jit.trace(model, input_tuple) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) start=time.time() traced_model(tokenized_dict['input_ids'], tokenized_dict['attention_mask']) end=time.time() print(end-start) torch.jit.save(traced_model, "compiled_deberta.pt") # my_script_module = torch.jit.script(model)
transformers/scripts/deberta_scrtipt.py/0
{ "file_path": "transformers/scripts/deberta_scrtipt.py", "repo_id": "transformers", "token_count": 695 }
80
# Copyright 2021 The HuggingFace Team, the AllenNLP library authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script to close stale issue. Taken in part from the AllenNLP repository. https://github.com/allenai/allennlp. """ import os from datetime import datetime as dt import github.GithubException from github import Github LABELS_TO_EXEMPT = [ "good first issue", "good second issue", "good difficult issue", "feature request", "new model", "wip", ] def main(): g = Github(os.environ["GITHUB_TOKEN"]) repo = g.get_repo("huggingface/transformers") open_issues = repo.get_issues(state="open") for i, issue in enumerate(open_issues): print(i, issue) comments = sorted(list(issue.get_comments()), key=lambda i: i.created_at, reverse=True) last_comment = comments[0] if len(comments) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at.replace(tzinfo=None)).days > 7 and (dt.utcnow() - issue.created_at.replace(tzinfo=None)).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") try: issue.edit(state="closed") except github.GithubException as e: print("Couldn't close the issue:", repr(e)) elif ( (dt.utcnow() - issue.updated_at.replace(tzinfo=None)).days > 23 and (dt.utcnow() - issue.created_at.replace(tzinfo=None)).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") try: issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored." ) except github.GithubException as e: print("Couldn't create comment:", repr(e)) if __name__ == "__main__": main()
transformers/scripts/stale.py/0
{ "file_path": "transformers/scripts/stale.py", "repo_id": "transformers", "token_count": 1217 }
81
#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..utils import cached_file # docstyle-ignore CHAT_MESSAGE_PROMPT = """ Human: <<task>> Assistant: """ DEFAULT_PROMPTS_REPO = "huggingface-tools/default-prompts" PROMPT_FILES = {"chat": "chat_prompt_template.txt", "run": "run_prompt_template.txt"} def download_prompt(prompt_or_repo_id, agent_name, mode="run"): """ Downloads and caches the prompt from a repo and returns it contents (if necessary). """ if prompt_or_repo_id is None: prompt_or_repo_id = DEFAULT_PROMPTS_REPO # prompt is considered a repo ID when it does not contain any kind of space if re.search("\\s", prompt_or_repo_id) is not None: return prompt_or_repo_id prompt_file = cached_file( prompt_or_repo_id, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name} ) with open(prompt_file, "r", encoding="utf-8") as f: return f.read() DEFAULT_CODE_SYSTEM_PROMPT = """You will be given a task to solve, your job is to come up with a series of simple commands in Python that will perform the task. To help you, I will give you access to a set of tools that you can use. Each tool is a Python function and has a description explaining the task it performs, the inputs it expects and the outputs it returns. You should first explain which tool you will use to perform the task and for what reason, then write the code in Python. Each instruction in Python should be a simple assignment. You can print intermediate results if it makes sense to do so. In the end, use tool 'final_answer' to return your answer, its argument will be what gets returned. You can use imports in your code, but only from the following list of modules: <<authorized_imports>> Be sure to provide a 'Code:' token, else the run will fail. Tools: <<tool_descriptions>> Examples: --- Task: "Answer the question in the variable `question` about the image stored in the variable `image`. The question is in French." Thought: I will use the following tools: `translator` to translate the question into English and then `image_qa` to answer the question on the input image. Code: ```py translated_question = translator(question=question, src_lang="French", tgt_lang="English") print(f"The translated question is {translated_question}.") answer = image_qa(image=image, question=translated_question) final_answer(f"The answer is {answer}") ```<end_action> --- Task: "Identify the oldest person in the `document` and create an image showcasing the result." Thought: I will use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Code: ```py answer = document_qa(document, question="What is the oldest person?") print(f"The answer is {answer}.") image = image_generator(answer) final_answer(image) ```<end_action> --- Task: "Generate an image using the text given in the variable `caption`." Thought: I will use the following tool: `image_generator` to generate an image. Code: ```py image = image_generator(prompt=caption) final_answer(image) ```<end_action> --- Task: "Summarize the text given in the variable `text` and read it out loud." Thought: I will use the following tools: `summarizer` to create a summary of the input text, then `text_reader` to read it out loud. Code: ```py summarized_text = summarizer(text) print(f"Summary: {summarized_text}") audio_summary = text_reader(summarized_text) final_answer(audio_summary) ```<end_action> --- Task: "Answer the question in the variable `question` about the text in the variable `text`. Use the answer to generate an image." Thought: I will use the following tools: `text_qa` to create the answer, then `image_generator` to generate an image according to the answer. Code: ```py answer = text_qa(text=text, question=question) print(f"The answer is {answer}.") image = image_generator(answer) final_answer(image) ```<end_action> --- Task: "Caption the following `image`." Thought: I will use the following tool: `image_captioner` to generate a caption for the image. Code: ```py caption = image_captioner(image) final_answer(caption) ```<end_action> --- Above example were using tools that might not exist for you. You only have access to these Tools: <<tool_names>> Remember to make sure that variables you use are all defined. Be sure to provide a 'Code:\n```' sequence before the code and '```<end_action>' after, else you will get an error. DO NOT pass the arguments as a dict as in 'answer = ask_search_agent({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = ask_search_agent(query="What is the place where James Bond lives?")'. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. """ DEFAULT_REACT_JSON_SYSTEM_PROMPT = """You are an expert assistant who can solve any task using JSON tool calls. You will be given a task to solve as best you can. To do so, you have been given access to the following tools: <<tool_names>> The way you use the tools is by specifying a json blob, ending with '<end_action>'. Specifically, this json should have an `action` key (name of the tool to use) and an `action_input` key (input to the tool). The $ACTION_JSON_BLOB should only contain a SINGLE action, do NOT return a list of multiple actions. It should be formatted in json. Do not try to escape special characters. Here is the template of a valid $ACTION_JSON_BLOB: { "action": $TOOL_NAME, "action_input": $INPUT }<end_action> Make sure to have the $INPUT as a dictionary in the right format for the tool you are using, and do not put variable names as input if you can find the right values. You should ALWAYS use the following format: Thought: you should always think about one action to take. Then use the action as follows: Action: $ACTION_JSON_BLOB Observation: the result of the action ... (this Thought/Action/Observation can repeat N times, you should take several steps when needed. The $ACTION_JSON_BLOB must only use a SINGLE action at a time.) You can use the result of the previous action as input for the next action. The observation will always be a string: it can represent a file, like "image_1.jpg". Then you can use it as input for the next action. You can do it for instance as follows: Observation: "image_1.jpg" Thought: I need to transform the image that I received in the previous observation to make it green. Action: { "action": "image_transformer", "action_input": {"image": "image_1.jpg"} }<end_action> To provide the final answer to the task, use an action blob with "action": "final_answer" tool. It is the only way to complete the task, else you will be stuck on a loop. So your final output should look like this: Action: { "action": "final_answer", "action_input": {"answer": "insert your final answer here"} }<end_action> Here are a few examples using notional tools: --- Task: "Generate an image of the oldest person in this document." Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Action: { "action": "document_qa", "action_input": {"document": "document.pdf", "question": "Who is the oldest person mentioned?"} }<end_action> Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Thought: I will now generate an image showcasing the oldest person. Action: { "action": "image_generator", "action_input": {"prompt": "A portrait of John Doe, a 55-year-old man living in Canada."} }<end_action> Observation: "image.png" Thought: I will now return the generated image. Action: { "action": "final_answer", "action_input": "image.png" }<end_action> --- Task: "What is the result of the following operation: 5 + 3 + 1294.678?" Thought: I will use python code evaluator to compute the result of the operation and then return the final answer using the `final_answer` tool Action: { "action": "python_interpreter", "action_input": {"code": "5 + 3 + 1294.678"} }<end_action> Observation: 1302.678 Thought: Now that I know the result, I will now return it. Action: { "action": "final_answer", "action_input": "1302.678" }<end_action> --- Task: "Which city has the highest population , Guangzhou or Shanghai?" Thought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities. Action: { "action": "search", "action_input": "Population Guangzhou" }<end_action> Observation: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Thought: Now let's get the population of Shanghai using the tool 'search'. Action: { "action": "search", "action_input": "Population Shanghai" } Observation: '26 million (2019)' Thought: Now I know that Shanghai has a larger population. Let's return the result. Action: { "action": "final_answer", "action_input": "Shanghai" }<end_action> Above example were using notional tools that might not exist for you. You only have access to these tools: <<tool_descriptions>> Here are the rules you should always follow to solve your task: 1. ALWAYS provide a 'Thought:' sequence, and an 'Action:' sequence that ends with <end_action>, else you will fail. 2. Always use the right arguments for the tools. Never use variable names in the 'action_input' field, use the value instead. 3. Call a tool only when needed: do not call the search agent if you do not need information, try to solve the task yourself. 4. Never re-do a tool call that you previously did with the exact same parameters. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. """ DEFAULT_REACT_CODE_SYSTEM_PROMPT = """You are an expert assistant who can solve any task using code blobs. You will be given a task to solve as best you can. To do so, you have been given access to a list of tools: these tools are basically Python functions which you can call with code. To solve the task, you must plan forward to proceed in a series of steps, in a cycle of 'Thought:', 'Code:', and 'Observation:' sequences. At each step, in the 'Thought:' sequence, you should first explain your reasoning towards solving the task and the tools that you want to use. Then in the 'Code:' sequence, you should write the code in simple Python. The code sequence must end with '<end_action>' sequence. During each intermediate step, you can use 'print()' to save whatever important information you will then need. These print outputs will then appear in the 'Observation:' field, which will be available as input for the next step. In the end you have to return a final answer using the `final_answer` tool. Here are a few examples using notional tools: --- Task: "Generate an image of the oldest person in this document." Thought: I will proceed step by step and use the following tools: `document_qa` to find the oldest person in the document, then `image_generator` to generate an image according to the answer. Code: ```py answer = document_qa(document=document, question="Who is the oldest person mentioned?") print(answer) ```<end_action> Observation: "The oldest person in the document is John Doe, a 55 year old lumberjack living in Newfoundland." Thought: I will now generate an image showcasing the oldest person. Code: ```py image = image_generator("A portrait of John Doe, a 55-year-old man living in Canada.") final_answer(image) ```<end_action> --- Task: "What is the result of the following operation: 5 + 3 + 1294.678?" Thought: I will use python code to compute the result of the operation and then return the final answer using the `final_answer` tool Code: ```py result = 5 + 3 + 1294.678 final_answer(result) ```<end_action> --- Task: "Which city has the highest population: Guangzhou or Shanghai?" Thought: I need to get the populations for both cities and compare them: I will use the tool `search` to get the population of both cities. Code: ```py population_guangzhou = search("Guangzhou population") print("Population Guangzhou:", population_guangzhou) population_shanghai = search("Shanghai population") print("Population Shanghai:", population_shanghai) ```<end_action> Observation: Population Guangzhou: ['Guangzhou has a population of 15 million inhabitants as of 2021.'] Population Shanghai: '26 million (2019)' Thought: Now I know that Shanghai has the highest population. Code: ```py final_answer("Shanghai") ```<end_action> --- Task: "What is the current age of the pope, raised to the power 0.36?" Thought: I will use the tool `wiki` to get the age of the pope, then raise it to the power 0.36. Code: ```py pope_age = wiki(query="current pope age") print("Pope age:", pope_age) ```<end_action> Observation: Pope age: "The pope Francis is currently 85 years old." Thought: I know that the pope is 85 years old. Let's compute the result using python code. Code: ```py pope_current_age = 85 ** 0.36 final_answer(pope_current_age) ```<end_action> Above example were using notional tools that might not exist for you. On top of performing computations in the Python code snippets that you create, you have access to these tools (and no other tool): <<tool_descriptions>> <<managed_agents_descriptions>> Here are the rules you should always follow to solve your task: 1. Always provide a 'Thought:' sequence, and a 'Code:\n```py' sequence ending with '```<end_action>' sequence, else you will fail. 2. Use only variables that you have defined! 3. Always use the right arguments for the tools. DO NOT pass the arguments as a dict as in 'answer = wiki({'query': "What is the place where James Bond lives?"})', but use the arguments directly as in 'answer = wiki(query="What is the place where James Bond lives?")'. 4. Take care to not chain too many sequential tool calls in the same code block, especially when the output format is unpredictable. For instance, a call to search has an unpredictable return format, so do not have another tool call that depends on its output in the same block: rather output results with print() to use them in the next block. 5. Call a tool only when needed, and never re-do a tool call that you previously did with the exact same parameters. 6. Don't name any new variable with the same name as a tool: for instance don't name a variable 'final_answer'. 7. Never create any notional variables in our code, as having these in your logs might derail you from the true variables. 8. You can use imports in your code, but only from the following list of modules: <<authorized_imports>> 9. The state persists between code executions: so if in one step you've created variables or imported modules, these will all persist. 10. Don't give up! You're in charge of solving the task, not providing directions to solve it. Now Begin! If you solve the task correctly, you will receive a reward of $1,000,000. """ SYSTEM_PROMPT_FACTS = """Below I will present you a task. You will now build a comprehensive preparatory survey of which facts we have at our disposal and which ones we still need. To do so, you will have to read the task and identify things that must be discovered in order to successfully complete it. Don't make any assumptions. For each item, provide a thorough reasoning. Here is how you will structure this survey: --- ### 1. Facts given in the task List here the specific facts given in the task that could help you (there might be nothing here). ### 2. Facts to look up List here any facts that we may need to look up. Also list where to find each of these, for instance a website, a file... - maybe the task contains some sources that you should re-use here. ### 3. Facts to derive List here anything that we want to derive from the above by logical reasoning, for instance computation or simulation. Keep in mind that "facts" will typically be specific names, dates, values, etc. Your answer should use the below headings: ### 1. Facts given in the task ### 2. Facts to look up ### 3. Facts to derive Do not add anything else.""" SYSTEM_PROMPT_PLAN = """You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools. Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there.""" USER_PROMPT_PLAN = """ Here is your task: Task: ``` {task} ``` Your plan can leverage any of these tools: {tool_descriptions} {managed_agents_descriptions} List of facts that you know: ``` {answer_facts} ``` Now begin! Write your plan below.""" SYSTEM_PROMPT_FACTS_UPDATE = """ You are a world expert at gathering known and unknown facts based on a conversation. Below you will find a task, and ahistory of attempts made to solve the task. You will have to produce a list of these: ### 1. Facts given in the task ### 2. Facts that we have learned ### 3. Facts still to look up ### 4. Facts still to derive Find the task and history below.""" USER_PROMPT_FACTS_UPDATE = """Earlier we've built a list of facts. But since in your previous steps you may have learned useful new facts or invalidated some false ones. Please update your list of facts based on the previous history, and provide these headings: ### 1. Facts given in the task ### 2. Facts that we have learned ### 3. Facts still to look up ### 4. Facts still to derive Now write your new list of facts below.""" SYSTEM_PROMPT_PLAN_UPDATE = """You are a world expert at making efficient plans to solve any task using a set of carefully crafted tools. You have been given a task: ``` {task} ``` Find below the record of what has been tried so far to solve it. Then you will be asked to make an updated plan to solve the task. If the previous tries so far have met some success, you can make an updated plan based on these actions. If you are stalled, you can make a completely new plan starting from scratch. """ USER_PROMPT_PLAN_UPDATE = """You're still working towards solving this task: ``` {task} ``` You have access to these tools and only these: {tool_descriptions} {managed_agents_descriptions} Here is the up to date list of facts that you know: ``` {facts_update} ``` Now for the given task, develop a step-by-step high-level plan taking into account the above inputs and list of facts. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Beware that you have {remaining_steps} steps remaining. Do not skip steps, do not add any superfluous steps. Only write the high-level plan, DO NOT DETAIL INDIVIDUAL TOOL CALLS. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there. Now write your new plan below.""" SYSTEM_PROMPT_PLAN_STRUCTURED = """Output a step-by-step plan to solve the task using the given tools. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Each step should be structured as follows: Step #n: { "description": <description of what the step does and its output> "tool": <tool to use>, "params": { <parameters to pass to the tool as a valid dict> } "output_var": <output variable name> } Each step must be necessary to reach the final answer. Steps should reuse outputs produced by earlier steps. The last step must be the final answer. Below are some examples: Example 1: ------ Inputs: --- Task: How many encoder blocks were in the first attention-only ML architecture published? [FACTS LIST]: ### 1. Facts given in the task - The paper first introduced an attention-only ML architecture. - The specific information required is the page number where the number of encoder blocks is stated. - No local files are provided for access. ### 2. Facts to look up - The title and authors of the paper that first introduced an attention-only ML architecture. - Source: Online search (e.g., Google Scholar, arXiv, or other academic databases) - The full text of the identified paper. - Source: Online academic repositories (e.g., arXiv, journal websites) - The specific page number in the paper where the number of encoder blocks is mentioned. - Source: The content of the identified paper ### 3. Facts to derive - By identifying the correct paper and locating the specific page, we will derive the page number where the number of encoder blocks is stated. - Logical steps: Identify the correct paper, access its content, search for the term "encoder blocks," and note the page number where this information is found. ``` [STEP 1 TOOL CALL]: {'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Identify the title and authors of the paper that first introduced an attention-only ML architecture.\nanswer = ask_search_agent(query="Can you find the title and authors of the paper that first introduced an attention-only machine learning architecture? Please provide the full citation.")\nprint(answer)'} [OUTPUT OF STEP 1] Observation: **Title**: Attention Is All You Need **Authors**: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin [STEP 2 TOOL CALL]: {'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Find the full text of the identified paper on arXiv\\npaper_url = "https://arxiv.org/pdf/1706.03762.pdf"\\nprint(paper_url)'} [OUTPUT OF STEP 2] Observation: https://arxiv.org/pdf/1706.03762.pdf --- Output plan: --- Step #1: { "description": "Open the PDF of the paper from the provided URL and search within the text of the paper for the mention of "encoder blocks"", "tool": "inspect_file_as_text", "params": { "file_path": "https://arxiv.org/pdf/1706.03762.pdf", "question": "On which page is the number of encoder blocks mentioned?" }, "output_var": "page_number" } Step #2: { "description": "Provide the final answer", "tool": "final_answer", "params": { "answer": "{page_number}" }, "output_var": "" } ------ Example 2: ------ Inputs: --- Task: How many golf balls fits into a Boeing-747? [FACTS LIST]: ### 1. Facts given in the task - The task requires calculating the number of golf balls that fir into a Boeing-747 ### 2. Facts to look up - The volume of a golf ball - The volume of a Boeing-747 ### 3. Facts to derive - Once the volumes are known the final answer can be calculated --- Output plan: --- Step #1: { "description": "Find the volume of a Boeing-747", "tool": "web_search", "params": { "query": "What is the internal volume of a Boeing-747 in cubic meters?" }, "output_var": "boeing_volume" } Step #2: { "description": "Find the volume of a standard golf ball", "tool": "ask_search_agent", "params": { "query": "What is the volume of a standard golf ball in cubic centimeters?" }, "output_var": "golf_ball_volume" } Step #3: { "description": "Convert the volume of a golf ball from cubic centimeters to cubic meters. Calculate the number of golf balls that fit into the Boeing-747 by dividing the internal volume of the Boeing-747 by the volume of a golf ball.", "tool": "python_code", "params": { "code": "golf_ball_volume_m3 = golf_ball_volume / 1e6\nnumber_of_golf_balls = boeing_volume / golf_ball_volume_m3" }, "output_var": "number_of_golf_balls" } Step #4: { "description": "Provide the final answer", "tool": "final_answer", "params": { "answer": "{number_of_golf_balls}" }, "output_var": "" } ------ Above example were using tools that might not exist for you. Your goal is to create a plan to solve the task.""" USER_PROMPT_PLAN_STRUCTURED = """ Here are your inputs: Task: ``` {task} ``` Your plan can leverage any of these tools: {tool_descriptions} These tools are Python functions which you can call with code. You also have access to a Python interpreter so you can run Python code. List of facts that you know: ``` {answer_facts} ``` Now for the given task, create a plan taking into account the list of facts. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there. Output the plan only and nothing else.""" SYSTEM_PROMPT_PLAN_UPDATE_STRUCTURED = """Output a step-by-step plan to solve the task using the given tools. This plan should involve individual tasks based on the available tools, that if executed correctly will yield the correct answer. Each step should be structured as follows: Step #n: {{ "description": <description of what the step does and its output> "tool": <tool to use>, "params": {{ <parameters to pass to the tool as a valid dict> }} "output_var": <output variable name> }} Each step must be necessary to reach the final answer. Steps should reuse outputs produced by earlier steps. The last step must be the final answer. Below are some examples: Example 1: ------ Inputs: --- Task: How many encoder blocks were in the first attention-only ML architecture published? [FACTS LIST]: ### 1. Facts given in the task - The paper first introduced an attention-only ML architecture. - The specific information required is the page number where the number of encoder blocks is stated. - No local files are provided for access. ### 2. Facts to look up - The title and authors of the paper that first introduced an attention-only ML architecture. - Source: Online search (e.g., Google Scholar, arXiv, or other academic databases) - The full text of the identified paper. - Source: Online academic repositories (e.g., arXiv, journal websites) - The specific page number in the paper where the number of encoder blocks is mentioned. - Source: The content of the identified paper ### 3. Facts to derive - By identifying the correct paper and locating the specific page, we will derive the page number where the number of encoder blocks is stated. - Logical steps: Identify the correct paper, access its content, search for the term "encoder blocks," and note the page number where this information is found. ``` [STEP 1 TOOL CALL]: {{'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Identify the title and authors of the paper that first introduced an attention-only ML architecture.\nanswer = ask_search_agent(query="Can you find the title and authors of the paper that first introduced an attention-only machine learning architecture? Please provide the full citation.")\nprint(answer)'}} [OUTPUT OF STEP 1] Observation: **Title**: Attention Is All You Need **Authors**: Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, Illia Polosukhin [STEP 2 TOOL CALL]: {{'tool_name': 'code interpreter', 'tool_arguments': '# Step 1: Find the full text of the identified paper on arXiv\\npaper_url = "https://arxiv.org/pdf/1706.03762.pdf"\\nprint(paper_url)'}} [OUTPUT OF STEP 2] Observation: https://arxiv.org/pdf/1706.03762.pdf --- Output plan: --- Step #1: {{ "description": "Open the PDF of the paper from the provided URL and search within the text of the paper for the mention of "encoder blocks"", "tool": "inspect_file_as_text", "params": {{ "file_path": "https://arxiv.org/pdf/1706.03762.pdf", "question": "On which page is the number of encoder blocks mentioned?" }}, "output_var": "page_number" }} Step #2: {{ "description": "Provide the final answer", "tool": "final_answer", "params": {{ "answer": "{{page_number}}" }}, "output_var": "" }} ------ Example 2: ------ Inputs: --- Task: How many golf balls fits into a Boeing-747? [FACTS LIST]: ### 1. Facts given in the task - The task requires calculating the number of golf balls that fir into a Boeing-747 ### 2. Facts to look up - The volume of a golf ball - The volume of a Boeing-747 ### 3. Facts to derive - Once the volumes are known the final answer can be calculated --- Output plan: --- Step #1: {{ "description": "Find the volume of a Boeing-747", "tool": "web_search", "params": {{ "query": "What is the internal volume of a Boeing-747 in cubic meters?" }}, "output_var": "boeing_volume" }} Step #2: {{ "description": "Find the volume of a standard golf ball", "tool": "ask_search_agent", "params": {{ "query": "What is the volume of a standard golf ball in cubic centimeters?" }}, "output_var": "golf_ball_volume" }} Step #3: {{ "description": "Convert the volume of a golf ball from cubic centimeters to cubic meters. Calculate the number of golf balls that fit into the Boeing-747 by dividing the internal volume of the Boeing-747 by the volume of a golf ball.", "tool": "python_code", "params": {{ "code": "golf_ball_volume_m3 = golf_ball_volume / 1e6\nnumber_of_golf_balls = boeing_volume / golf_ball_volume_m3" }}, "output_var": "number_of_golf_balls" }} Step #4: {{ "description": "Provide the final answer", "tool": "final_answer", "params": {{ "answer": "{{number_of_golf_balls}}" }}, "output_var": "" }} ------ Above example were using tools that might not exist for you. Find below the record of what has been tried so far to solve it. Your goal is to create an updated plan to solve the task.""" USER_PROMPT_PLAN_UPDATE_STRUCTURED = """ Here are your inputs: Task: ``` {task} ``` Your plan can leverage any of these tools: {tool_descriptions} These tools are Python functions which you can call with code. You also have access to a Python interpreter so you can run Python code. List of facts that you know: ``` {facts_update} ``` Now for the given task, create a plan taking into account the above inputs and list of facts. Beware that you have {remaining_steps} steps remaining. After writing the final step of the plan, write the '\n<end_plan>' tag and stop there. Output the plan only and nothing else.""" PLAN_UPDATE_FINAL_PLAN_REDACTION = """I still need to solve the task I was given: ``` {task} ``` Here is my new/updated plan of action to solve the task: ``` {plan_update} ```""" SUPPORTED_PLAN_TYPES = ["default", "structured"] PROMPTS_FOR_INITIAL_PLAN = { "default": {"system": SYSTEM_PROMPT_PLAN, "user": USER_PROMPT_PLAN}, "structured": {"system": SYSTEM_PROMPT_PLAN_STRUCTURED, "user": USER_PROMPT_PLAN_STRUCTURED}, } PROMPTS_FOR_PLAN_UPDATE = { "default": {"system": SYSTEM_PROMPT_PLAN_UPDATE, "user": USER_PROMPT_PLAN_UPDATE}, "structured": {"system": SYSTEM_PROMPT_PLAN_UPDATE_STRUCTURED, "user": USER_PROMPT_PLAN_UPDATE_STRUCTURED}, }
transformers/src/transformers/agents/prompts.py/0
{ "file_path": "transformers/src/transformers/agents/prompts.py", "repo_id": "transformers", "token_count": 9105 }
82
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand logger = logging.get_logger(__name__) # pylint: disable=invalid-name def try_infer_format_from_ext(path: str): if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(ext): return ext raise Exception( f"Unable to determine file format from file extension {path}. " f"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" ) def run_command_factory(args): nlp = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) format = try_infer_format_from_ext(args.input) if args.format == "infer" else args.format reader = PipelineDataFormat.from_str( format=format, output_path=args.output, input_path=args.input, column=args.column if args.column else nlp.default_input_names, overwrite=args.overwrite, ) return RunCommand(nlp, reader) class RunCommand(BaseTransformersCLICommand): def __init__(self, nlp: Pipeline, reader: PipelineDataFormat): self._nlp = nlp self._reader = reader @staticmethod def register_subcommand(parser: ArgumentParser): run_parser = parser.add_parser("run", help="Run a pipeline through the CLI") run_parser.add_argument("--task", choices=get_supported_tasks(), help="Task to run") run_parser.add_argument("--input", type=str, help="Path to the file to use for inference") run_parser.add_argument("--output", type=str, help="Path to the file that will be used post to write results.") run_parser.add_argument("--model", type=str, help="Name or path to the model to instantiate.") run_parser.add_argument("--config", type=str, help="Name or path to the model's config to instantiate.") run_parser.add_argument( "--tokenizer", type=str, help="Name of the tokenizer to use. (default: same as the model name)" ) run_parser.add_argument( "--column", type=str, help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)", ) run_parser.add_argument( "--format", type=str, default="infer", choices=PipelineDataFormat.SUPPORTED_FORMATS, help="Input format to read from", ) run_parser.add_argument( "--device", type=int, default=-1, help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)", ) run_parser.add_argument("--overwrite", action="store_true", help="Allow overwriting the output file.") run_parser.set_defaults(func=run_command_factory) def run(self): nlp, outputs = self._nlp, [] for entry in self._reader: output = nlp(**entry) if self._reader.is_multi_columns else nlp(entry) if isinstance(output, dict): outputs.append(output) else: outputs += output # Saving data if self._nlp.binary_output: binary_path = self._reader.save_binary(outputs) logger.warning(f"Current pipeline requires output to be in binary format, saving at {binary_path}") else: self._reader.save(outputs)
transformers/src/transformers/commands/run.py/0
{ "file_path": "transformers/src/transformers/commands/run.py", "repo_id": "transformers", "token_count": 1665 }
83
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadV1Processor, SquadV2Processor, squad_convert_examples_to_features logger = logging.get_logger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class SquadDataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ model_type: str = field( default=None, metadata={"help": "Model type selected in the list: " + ", ".join(MODEL_TYPES)} ) data_dir: str = field( default=None, metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) max_seq_length: int = field( default=128, metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) }, ) doc_stride: int = field( default=128, metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, ) max_query_length: int = field( default=64, metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) }, ) max_answer_length: int = field( default=30, metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) version_2_with_negative: bool = field( default=False, metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) null_score_diff_threshold: float = field( default=0.0, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) n_best_size: int = field( default=20, metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) lang_id: int = field( default=0, metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) }, ) threads: int = field(default=1, metadata={"help": "multiple threads for converting example to features"}) class Split(Enum): train = "train" dev = "dev" class SquadDataset(Dataset): """ This will be superseded by a framework-agnostic approach soon. """ args: SquadDataTrainingArguments features: List[SquadFeatures] mode: Split is_language_sensitive: bool def __init__( self, args: SquadDataTrainingArguments, tokenizer: PreTrainedTokenizer, limit_length: Optional[int] = None, mode: Union[str, Split] = Split.train, is_language_sensitive: Optional[bool] = False, cache_dir: Optional[str] = None, dataset_format: Optional[str] = "pt", ): self.args = args self.is_language_sensitive = is_language_sensitive self.processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if isinstance(mode, str): try: mode = Split[mode] except KeyError: raise KeyError("mode is not a valid split name") self.mode = mode # Load data features from cache or dataset file version_tag = "v2" if args.version_2_with_negative else "v1" cached_features_file = os.path.join( cache_dir if cache_dir is not None else args.data_dir, f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}", ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. lock_path = cached_features_file + ".lock" with FileLock(lock_path): if os.path.exists(cached_features_file) and not args.overwrite_cache: start = time.time() self.old_features = torch.load(cached_features_file) # Legacy cache files have only features, while new cache files # will have dataset and examples also. self.features = self.old_features["features"] self.dataset = self.old_features.get("dataset", None) self.examples = self.old_features.get("examples", None) logger.info( f"Loading features from cached file {cached_features_file} [took %.3f s]", time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" " future run" ) else: if mode == Split.dev: self.examples = self.processor.get_dev_examples(args.data_dir) else: self.examples = self.processor.get_train_examples(args.data_dir) self.features, self.dataset = squad_convert_examples_to_features( examples=self.examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=mode == Split.train, threads=args.threads, return_dataset=dataset_format, ) start = time.time() torch.save( {"features": self.features, "dataset": self.dataset, "examples": self.examples}, cached_features_file, ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__(self): return len(self.features) def __getitem__(self, i) -> Dict[str, torch.Tensor]: # Convert to Tensors and build dataset feature = self.features[i] input_ids = torch.tensor(feature.input_ids, dtype=torch.long) attention_mask = torch.tensor(feature.attention_mask, dtype=torch.long) token_type_ids = torch.tensor(feature.token_type_ids, dtype=torch.long) cls_index = torch.tensor(feature.cls_index, dtype=torch.long) p_mask = torch.tensor(feature.p_mask, dtype=torch.float) is_impossible = torch.tensor(feature.is_impossible, dtype=torch.float) inputs = { "input_ids": input_ids, "attention_mask": attention_mask, "token_type_ids": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": cls_index, "p_mask": p_mask}) if self.args.version_2_with_negative: inputs.update({"is_impossible": is_impossible}) if self.is_language_sensitive: inputs.update({"langs": (torch.ones(input_ids.shape, dtype=torch.int64) * self.args.lang_id)}) if self.mode == Split.train: start_positions = torch.tensor(feature.start_position, dtype=torch.long) end_positions = torch.tensor(feature.end_position, dtype=torch.long) inputs.update({"start_positions": start_positions, "end_positions": end_positions}) return inputs
transformers/src/transformers/data/datasets/squad.py/0
{ "file_path": "transformers/src/transformers/data/datasets/squad.py", "repo_id": "transformers", "token_count": 4006 }
84
from abc import ABC, abstractmethod from typing import List, Optional class Constraint(ABC): r"""Abstract base class for all constraints that can be applied during generation. It must define how the constraint can be satisfied. All classes that inherit Constraint must follow the requirement that ```py completed = False while not completed: _, completed = constraint.update(constraint.advance()) ``` will always terminate (halt). """ def __init__(self): # test for the above condition self.test() def test(self): """ Tests whether this constraint has been properly defined. """ counter = 0 completed = False while not completed: if counter == 1: self.reset() advance = self.advance() if not self.does_advance(advance): raise Exception( "Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true." ) stepped, completed, reset = self.update(advance) counter += 1 if counter > 10000: raise Exception("update() does not fulfill the constraint.") if self.remaining() != 0: raise Exception("Custom Constraint is not defined correctly.") @abstractmethod def advance(self): """ When called, returns the token(s) that would take this constraint one step closer to being fulfilled. Return: token_ids (Union[int, List[int], None]): - A single token ID (int) that advances the constraint, or - A list of token IDs that could advance the constraint - None if the constraint is completed or cannot be advanced """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def does_advance(self, token_id: int): """ Reads in a token and returns whether it creates progress. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def update(self, token_id: int): """ Reads in a token and returns booleans that indicate the progress made by it. This function will update the state of this object unlikes `does_advance(self, token_id: int)`. This isn't to test whether a certain token will advance the progress; it's to update its state as if it has been generated. This becomes important if token_id != desired token (refer to else statement in PhrasalConstraint) Args: token_id(`int`): The id of a newly generated token in the beam search. Return: stepped(`bool`): Whether this constraint has become one step closer to being fulfuilled. completed(`bool`): Whether this constraint has been completely fulfilled by this token being generated. reset (`bool`): Whether this constraint has reset its progress by this token being generated. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def reset(self): """ Resets the state of this constraint to its initialization. We would call this in cases where the fulfillment of a constraint is abrupted by an unwanted token. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def remaining(self): """ Returns the number of remaining steps of `advance()` in order to complete this constraint. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def copy(self, stateful=False): """ Creates a new instance of this constraint. Args: stateful(`bool`): Whether to not only copy the constraint for new instance, but also its state. Return: constraint(`Constraint`): The same constraint as the one being called from. """ raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class PhrasalConstraint(Constraint): r""" [`Constraint`] enforcing that an ordered sequence of tokens is included in the output. Args: token_ids (`List[int]`): The id of the token that must be generated by the output. """ def __init__(self, token_ids: List[int]): super(Constraint, self).__init__() if not isinstance(token_ids, list) or len(token_ids) == 0: raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}.") if any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids): raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.") self.token_ids = token_ids self.seqlen = len(self.token_ids) self.fulfilled_idx = -1 # the index of the currently fulfilled step self.completed = False def advance(self): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def does_advance(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def update(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.fulfilled_idx += 1 stepped = True if self.fulfilled_idx == (self.seqlen - 1): completed = True self.completed = completed else: # failed to make progress. reset = True self.reset() return stepped, completed, reset def reset(self): self.completed = False self.fulfilled_idx = 0 def remaining(self): return self.seqlen - (self.fulfilled_idx + 1) def copy(self, stateful=False): new_constraint = PhrasalConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.fulfilled_idx = self.fulfilled_idx new_constraint.completed = self.completed return new_constraint class DisjunctiveTrie: def __init__(self, nested_token_ids: List[List[int]], no_subsets=True): r""" A helper class that builds a trie with the words represented in `nested_token_ids`. """ self.max_height = max([len(one) for one in nested_token_ids]) root = {} for token_ids in nested_token_ids: level = root for tidx, token_id in enumerate(token_ids): if token_id not in level: level[token_id] = {} level = level[token_id] if no_subsets and self.has_subsets(root, nested_token_ids): raise ValueError( "Each list in `nested_token_ids` can't be a complete subset of another list, but is" f" {nested_token_ids}." ) self.trie = root def next_tokens(self, current_seq): """ The next possible tokens that will progress the trie, given the current sequence of tokens in `current_seq`. """ start = self.trie for current_token in current_seq: start = start[current_token] next_tokens = list(start.keys()) return next_tokens def reached_leaf(self, current_seq): next_tokens = self.next_tokens(current_seq) return len(next_tokens) == 0 def count_leaves(self, root): next_nodes = list(root.values()) if len(next_nodes) == 0: return 1 else: return sum([self.count_leaves(nn) for nn in next_nodes]) def has_subsets(self, trie, nested_token_ids): """ Returns whether # of leaves == # of words. Otherwise some word is a subset of another. """ leaf_count = self.count_leaves(trie) return len(nested_token_ids) != leaf_count class DisjunctiveConstraint(Constraint): r""" A special [`Constraint`] that is fulfilled by fulfilling just one of several constraints. Args: nested_token_ids (`List[List[int]]`): A list of words, where each word is a list of ids. This constraint is fulfilled by generating just one from the list of words. """ def __init__(self, nested_token_ids: List[List[int]]): super(Constraint, self).__init__() if not isinstance(nested_token_ids, list) or len(nested_token_ids) == 0: raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.") if any(not isinstance(token_ids, list) for token_ids in nested_token_ids): raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.") if any( any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids) for token_ids in nested_token_ids ): raise ValueError( f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." ) self.trie = DisjunctiveTrie(nested_token_ids) self.token_ids = nested_token_ids self.seqlen = self.trie.max_height self.current_seq = [] self.completed = False def advance(self): token_list = self.trie.next_tokens(self.current_seq) if len(token_list) == 0: return None else: return token_list def does_advance(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") next_tokens = self.trie.next_tokens(self.current_seq) return token_id in next_tokens def update(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False reset = False if self.does_advance(token_id): self.current_seq.append(token_id) stepped = True else: reset = True self.reset() completed = self.trie.reached_leaf(self.current_seq) self.completed = completed return stepped, completed, reset def reset(self): self.completed = False self.current_seq = [] def remaining(self): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq) def copy(self, stateful=False): new_constraint = DisjunctiveConstraint(self.token_ids) if stateful: new_constraint.seq_len = self.seqlen new_constraint.current_seq = self.current_seq new_constraint.completed = self.completed return new_constraint class ConstraintListState: r""" A class for beam scorers to track its progress through a list of constraints. Args: constraints (`List[Constraint]`): A list of [`Constraint`] objects that must be fulfilled by the beam scorer. """ def __init__(self, constraints: List[Constraint]): self.constraints = constraints # max # of steps required to fulfill a given constraint self.max_seqlen = max([c.seqlen for c in constraints]) self.n_constraints = len(constraints) self.completed = False self.init_state() def init_state(self): self.complete_constraints = [] self.inprogress_constraint = None self.pending_constraints = [constraint.copy(stateful=False) for constraint in self.constraints] def get_bank(self): add = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints) * self.max_seqlen) + add def advance(self): """The list of tokens to generate such that we can make progress. By "list" we don't mean the list of token that will fully fulfill a constraint. Given constraints `c_i = {t_ij | j == # of tokens}`, If we're not in the middle of progressing through a specific constraint `c_i`, we return: `[t_k1 for k in indices of unfulfilled constraints]` If we are in the middle of a constraint, then we return: `[t_ij]`, where `i` is the index of the inprogress constraint, `j` is the next step for the constraint. Though we don't care which constraint is fulfilled first, if we are in the progress of fulfilling a constraint, that's the only one we'll return. """ token_list = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" advance = constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) else: advance = self.inprogress_constraint.advance() if isinstance(advance, int): token_list.append(advance) elif isinstance(advance, list): token_list.extend(advance) if len(token_list) == 0: return None else: return token_list def reset(self, token_ids: Optional[List[int]]): """ token_ids: the tokens generated thus far to reset the state of the progress through constraints. """ self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint complete, stepped = self.add(token) # the entire list of constraints are fulfilled if self.completed: break def add(self, token_id: int): if not isinstance(token_id, int): raise TypeError(f"`token_id` should be an `int`, but is `{token_id}`.") complete, stepped = False, False if self.completed: complete = True stepped = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state stepped, complete, reset = self.inprogress_constraint.update(token_id) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=False)) self.inprogress_constraint = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint) self.inprogress_constraint = None if len(self.pending_constraints) == 0: # we're done! self.completed = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints): if pending_constraint.does_advance(token_id): stepped, complete, reset = pending_constraint.update(token_id) if not stepped: raise Exception( "`constraint.update(token_id)` is not yielding incremental progress, " "even though `constraint.does_advance(token_id)` is true." ) if complete: self.complete_constraints.append(pending_constraint) self.inprogress_constraint = None if not complete and stepped: self.inprogress_constraint = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". self.pending_constraints = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. self.completed = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def copy(self, stateful=True): new_state = ConstraintListState(self.constraints) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: new_state.complete_constraints = [ constraint.copy(stateful=True) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: new_state.inprogress_constraint = self.inprogress_constraint.copy(stateful=True) new_state.pending_constraints = [constraint.copy() for constraint in self.pending_constraints] return new_state
transformers/src/transformers/generation/beam_constraints.py/0
{ "file_path": "transformers/src/transformers/generation/beam_constraints.py", "repo_id": "transformers", "token_count": 8383 }
85
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Dict, Iterable, Optional, Union import numpy as np from .image_processing_base import BatchFeature, ImageProcessingMixin from .image_transforms import center_crop, normalize, rescale from .image_utils import ChannelDimension from .utils import logging logger = logging.get_logger(__name__) INIT_SERVICE_KWARGS = [ "processor_class", "image_processor_type", ] class BaseImageProcessor(ImageProcessingMixin): def __init__(self, **kwargs): super().__init__(**kwargs) def __call__(self, images, **kwargs) -> BatchFeature: """Preprocess an image or a batch of images.""" return self.preprocess(images, **kwargs) def preprocess(self, images, **kwargs) -> BatchFeature: raise NotImplementedError("Each image processor must implement its own preprocess method") def rescale( self, image: np.ndarray, scale: float, data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`float`): The scaling factor to rescale pixel values by. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The rescaled image. """ return rescale(image, scale=scale, data_format=data_format, input_data_format=input_data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. mean (`float` or `Iterable[float]`): Image mean to use for normalization. std (`float` or `Iterable[float]`): Image standard deviation to use for normalization. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. Returns: `np.ndarray`: The normalized image. """ return normalize( image, mean=mean, std=std, data_format=data_format, input_data_format=input_data_format, **kwargs ) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, input_data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format for the output image. If unset, the channel dimension format of the input image is used. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. input_data_format (`ChannelDimension` or `str`, *optional*): The channel dimension format for the input image. If unset, the channel dimension format is inferred from the input image. Can be one of: - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") return center_crop( image, size=(size["height"], size["width"]), data_format=data_format, input_data_format=input_data_format, **kwargs, ) def to_dict(self): encoder_dict = super().to_dict() encoder_dict.pop("_valid_processor_keys", None) return encoder_dict VALID_SIZE_DICT_KEYS = ( {"height", "width"}, {"shortest_edge"}, {"shortest_edge", "longest_edge"}, {"longest_edge"}, {"max_height", "max_width"}, ) def is_valid_size_dict(size_dict): if not isinstance(size_dict, dict): return False size_dict_keys = set(size_dict.keys()) for allowed_keys in VALID_SIZE_DICT_KEYS: if size_dict_keys == allowed_keys: return True return False def convert_to_size_dict( size, max_size: Optional[int] = None, default_to_square: bool = True, height_width_order: bool = True ): # By default, if size is an int we assume it represents a tuple of (size, size). if isinstance(size, int) and default_to_square: if max_size is not None: raise ValueError("Cannot specify both size as an int, with default_to_square=True and max_size") return {"height": size, "width": size} # In other configs, if size is an int and default_to_square is False, size represents the length of # the shortest edge after resizing. elif isinstance(size, int) and not default_to_square: size_dict = {"shortest_edge": size} if max_size is not None: size_dict["longest_edge"] = max_size return size_dict # Otherwise, if size is a tuple it's either (height, width) or (width, height) elif isinstance(size, (tuple, list)) and height_width_order: return {"height": size[0], "width": size[1]} elif isinstance(size, (tuple, list)) and not height_width_order: return {"height": size[1], "width": size[0]} elif size is None and max_size is not None: if default_to_square: raise ValueError("Cannot specify both default_to_square=True and max_size") return {"longest_edge": max_size} raise ValueError(f"Could not convert size input to size dict: {size}") def get_size_dict( size: Union[int, Iterable[int], Dict[str, int]] = None, max_size: Optional[int] = None, height_width_order: bool = True, default_to_square: bool = True, param_name="size", ) -> dict: """ Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height, width) or (width, height) format. - If `size` is tuple, it is converted to `{"height": size[0], "width": size[1]}` or `{"height": size[1], "width": size[0]}` if `height_width_order` is `False`. - If `size` is an int, and `default_to_square` is `True`, it is converted to `{"height": size, "width": size}`. - If `size` is an int and `default_to_square` is False, it is converted to `{"shortest_edge": size}`. If `max_size` is set, it is added to the dict as `{"longest_edge": max_size}`. Args: size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*): The `size` parameter to be cast into a size dictionary. max_size (`Optional[int]`, *optional*): The `max_size` parameter to be cast into a size dictionary. height_width_order (`bool`, *optional*, defaults to `True`): If `size` is a tuple, whether it's in (height, width) or (width, height) order. default_to_square (`bool`, *optional*, defaults to `True`): If `size` is an int, whether to default to a square image or not. """ if not isinstance(size, dict): size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order) logger.info( f"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}." f" Converted to {size_dict}.", ) else: size_dict = size if not is_valid_size_dict(size_dict): raise ValueError( f"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}" ) return size_dict def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple: """ Selects the best resolution from a list of possible resolutions based on the original size. This is done by calculating the effective and wasted resolution for each possible resolution. The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution. Args: original_size (tuple): The original size of the image in the format (height, width). possible_resolutions (list): A list of possible resolutions in the format [(height1, width1), (height2, width2), ...]. Returns: tuple: The best fit resolution in the format (height, width). """ original_height, original_width = original_size best_fit = None max_effective_resolution = 0 min_wasted_resolution = float("inf") for height, width in possible_resolutions: scale = min(width / original_width, height / original_height) downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale) effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height) wasted_resolution = (width * height) - effective_resolution if effective_resolution > max_effective_resolution or ( effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution ): max_effective_resolution = effective_resolution min_wasted_resolution = wasted_resolution best_fit = (height, width) return best_fit
transformers/src/transformers/image_processing_utils.py/0
{ "file_path": "transformers/src/transformers/image_processing_utils.py", "repo_id": "transformers", "token_count": 5018 }
86
# coding=utf-8 # Copyright 2024 The ggml.ai team and The HuggingFace Inc. team. and pygguf author (github.com/99991) # https://github.com/99991/pygguf # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Integration with GGML / The file is copied and adapted from https://github.com/99991/pygguf with extra methods beings exposed """ from array import array import numpy as np from tokenizers import Tokenizer, decoders, normalizers, pre_tokenizers, processors from tokenizers.models import BPE, Unigram from .. import AddedToken from ..convert_slow_tokenizer import GemmaConverter, GPT2Converter, LlamaConverter, Qwen2Converter, T5Converter from ..utils import logging from ..utils.logging import tqdm logger = logging.get_logger(__name__) GGUF_CONFIG_MAPPING = { "general": { "architecture": "model_type", "name": "_model_name_or_path", }, "llama": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", # NOTE: rope.dimension_count==head_dim only suitable for llama/mistral "rope.dimension_count": "head_dim", "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", }, "mistral": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", # NOTE: rope.dimension_count==head_dim only suitable for llama/mistral "rope.dimension_count": "head_dim", "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", }, "qwen2": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", }, "qwen2moe": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", "expert_count": "num_experts", "expert_used_count": "num_experts_per_tok", }, "falcon": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", }, "tokenizer": { "ggml.bos_token_id": "bos_token_id", "ggml.eos_token_id": "eos_token_id", "ggml.unknown_token_id": "unk_token_id", "ggml.padding_token_id": "pad_token_id", }, "phi3": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", }, "bloom": { "block_count": "n_layer", "embedding_length": "hidden_size", "attention.head_count": "n_head", "vocab_size": "vocab_size", "attention.layer_norm_epsilon": "layer_norm_epsilon", }, "t5": { "context_length": "n_positions", "block_count": "num_layers", "feed_forward_length": "d_ff", "embedding_length": "d_model", "attention.key_length": "d_kv", "attention.head_count": "num_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_epsilon": "layer_norm_epsilon", "attention.relative_buckets_count": "relative_attention_num_buckets", "decoder_start_token_id": "decoder_start_token_id", "vocab_size": "vocab_size", }, "stablelm": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_epsilon": "layer_norm_eps", "vocab_size": "vocab_size", }, "gpt2": { "block_count": "n_layer", "context_length": "n_ctx", "embedding_length": "n_embd", "feed_forward_length": "feed_forward_length", "attention.head_count": "n_head", "attention.layer_norm_epsilon": "layer_norm_epsilon", }, "starcoder2": { "block_count": "num_hidden_layers", "context_length": "max_position_embeddings", "embedding_length": "hidden_size", "feed_forward_length": "intermediate_size", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_epsilon": "norm_epsilon", }, "mamba": { "vocab_size": "vocab_size", "context_length": "max_position_embeddings", "embedding_length": "hidden_size", "attention.layer_norm_rms_epsilon": "layer_norm_epsilon", "block_count": "num_hidden_layers", "ssm.conv_kernel": "conv_kernel", "ssm.state_size": "state_size", "ssm.time_step_rank": "time_step_rank", "ssm.inner_size": "intermediate_size", }, "nemotron": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "rope.freq_base": "rope_theta", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "norm_eps", "vocab_size": "vocab_size", }, "gemma2": { "context_length": "max_position_embeddings", "block_count": "num_hidden_layers", "feed_forward_length": "intermediate_size", "embedding_length": "hidden_size", "rope.dimension_count": None, "rope.freq_base": "rope_theta", # NOTE: Gemma2 has key_length==value_length==head_dim # See: https://github.com/ggerganov/llama.cpp/blob/2e2f8f093cd4fb6bbb87ba84f6b9684fa082f3fa/convert_hf_to_gguf.py#L3293-L3294 "attention.key_length": "head_dim", "attention.head_count": "num_attention_heads", "attention.head_count_kv": "num_key_value_heads", "attention.layer_norm_rms_epsilon": "rms_norm_eps", "vocab_size": "vocab_size", }, } GGUF_TOKENIZER_MAPPING = { "tokenizer": { "ggml.model": "tokenizer_type", "ggml.tokens": "tokens", "ggml.scores": "scores", "ggml.token_type": "token_type", "ggml.merges": "merges", "ggml.bos_token_id": "bos_token_id", "ggml.eos_token_id": "eos_token_id", "ggml.unknown_token_id": "unk_token_id", "ggml.padding_token_id": "pad_token_id", "ggml.add_space_prefix": "add_prefix_space", }, "tokenizer_config": { "chat_template": "chat_template", "ggml.model": "model_type", "ggml.bos_token_id": "bos_token_id", "ggml.eos_token_id": "eos_token_id", "ggml.unknown_token_id": "unk_token_id", "ggml.padding_token_id": "pad_token_id", }, } def _gguf_parse_value(_value, data_type): if not isinstance(data_type, list): data_type = [data_type] if len(data_type) == 1: data_type = data_type[0] array_data_type = None else: if data_type[0] != 9: raise ValueError("Received multiple types, therefore expected the first type to indicate an array.") data_type, array_data_type = data_type if data_type in [0, 1, 2, 3, 4, 5, 10, 11]: _value = int(_value[0]) elif data_type in [6, 12]: _value = float(_value[0]) elif data_type in [7]: _value = bool(_value[0]) elif data_type in [8]: _value = array("B", list(_value)).tobytes().decode() elif data_type in [9]: _value = _gguf_parse_value(_value, array_data_type) return _value class GGUFTokenizerSkeleton: def __init__(self, dict_): for k, v in dict_.items(): setattr(self, k, v) if not hasattr(self, "merges"): if not hasattr(self, "tokens") or not hasattr(self, "scores"): raise ValueError( "tokens and scores need to be passed for a LLaMa tokenizer without merges to be instantiated." ) tokens = self.tokens scores = self.scores vocab = {t: scores[i] for i, t in enumerate(tokens)} logger.warning("Merges were not in checkpoint, building merges on the fly.") merges = [] for merge, piece_score in tqdm(vocab.items()): local = [] for index in range(1, len(merge)): piece_l, piece_r = merge[:index], merge[index:] if piece_l in tokens and piece_r in tokens: local.append((piece_l, piece_r, piece_score)) local = sorted(local, key=lambda x: (vocab[x[0]], vocab[x[1]]), reverse=True) merges.extend(local) merges = sorted(merges, key=lambda val: val[2], reverse=True) merges = [(val[0], val[1]) for val in merges] self.merges = merges else: self.merges = [tuple(merge.split(" ")) for merge in self.merges] if not hasattr(self, "scores"): self.scores = [None for _ in range(len(self.tokens))] if not hasattr(self, "added_tokens"): self.added_tokens = [] if not hasattr(self, "unk_token_id"): self.unk_token_id = None # Llama2 uses the field `unknown_token_id` if hasattr(self, "unknown_token_id") and self.unk_token_id is None: self.unk_token_id = self.unknown_token_id class GGUFLlamaConverter(LlamaConverter): def __init__(self, tokenizer_dict): self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.original_tokenizer = self.proto self.additional_kwargs = {} self.is_llama_3_tokenizer = getattr(self.proto, "tokenizer_type", "llama") != "llama" def vocab(self, proto): return list(zip(proto.tokens, proto.scores)) def merges(self, proto): return proto.merges def tokenizer(self, proto): vocab_scores = self.vocab(self.proto) merges = self.merges(self.proto) bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} unk_token = proto.tokens[proto.unk_token_id] if proto.unk_token_id is not None else None bos_token = proto.tokens[proto.bos_token_id] if getattr(proto, "bos_token_id", None) is not None else None eos_token = proto.tokens[proto.bos_token_id] if getattr(proto, "eos_token_id", None) is not None else None tokenizer = Tokenizer( BPE( bpe_vocab, merges, unk_token=unk_token, fuse_unk=True, byte_fallback=True, ) ) special_tokens = [] if not hasattr(self.proto, "token_type"): if unk_token is not None: special_tokens.append(AddedToken(unk_token, normalized=False, special=True)) if bos_token is not None: special_tokens.append(AddedToken(bos_token, normalized=False, special=True)) if eos_token is not None: special_tokens.append(AddedToken(eos_token, normalized=False, special=True)) else: # 3 stands for special tokens special_tokens_idx = np.where(np.array(self.proto.token_type) == 3)[0] for idx in special_tokens_idx: special_tokens.append(AddedToken(self.proto.tokens[idx], normalized=False, special=True)) if len(special_tokens) != 0: tokenizer.add_special_tokens(special_tokens) if len(self.proto.added_tokens) != 0: tokenizer.add_tokens( [AddedToken(added_token, normalized=False, special=False) for added_token in self.proto.added_tokens] ) self.additional_kwargs["unk_token"] = unk_token self.additional_kwargs["eos_token"] = bos_token self.additional_kwargs["bos_token"] = eos_token if self.is_llama_3_tokenizer: self.additional_kwargs["add_prefix_space"] = None self.additional_kwargs["clean_up_tokenization_spaces"] = True self.additional_kwargs["legacy"] = False self.original_tokenizer.legacy = False return tokenizer def decoder(self, replacement, add_prefix_space): sequence = [ decoders.ByteFallback(), decoders.Fuse(), decoders.Replace("▁", " "), ] if self.is_llama_3_tokenizer: sequence += [decoders.ByteLevel(add_prefix_space=False, trim_offsets=False, use_regex=True)] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def converted(self): # Copied partly from converted method in SpmConverter class tokenizer = self.tokenizer(self.proto) # Tokenizer assemble normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = "▁" add_prefix_space = True if hasattr(self.original_tokenizer, "add_prefix_space"): add_prefix_space = self.original_tokenizer.add_prefix_space pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer tokenizer.decoder = self.decoder(replacement, add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor # HACK: patch the llama-3 tokenizer to use the correspinding pre-tokenizer # and normalizer if self.is_llama_3_tokenizer: tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel( add_prefix_space=False, trim_offsets=False, use_regex=True ) # This is tricky as the additional kwargs are passed after legacy is force-set in LlamaTokenizer's # init. tokenizer.normalizer = normalizers.Sequence([]) return tokenizer class GGUFQwen2Converter(Qwen2Converter): def __init__(self, tokenizer_dict): self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict) self.additional_kwargs = {} def converted(self) -> Tokenizer: vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)} merges = self.original_tokenizer.merges tokenizer = super().converted(vocab, merges) tokenizer.add_special_tokens( [ AddedToken("<|endoftext|>", normalized=False, special=True), AddedToken("<|im_start|>", normalized=False, special=True), AddedToken("<|im_end|>", normalized=False, special=True), ] ) return tokenizer class GGUFPhi3Converter(LlamaConverter): def __init__(self, tokenizer_dict): self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.original_tokenizer = self.proto self.additional_kwargs = {} def vocab(self, proto): return list(zip(proto.tokens, proto.scores)) def merges(self, proto): return proto.merges def tokenizer(self, proto): vocab_scores = self.vocab(self.proto) merges = self.merges(self.proto) bpe_vocab = {word: i for i, (word, _score) in enumerate(vocab_scores)} tokenizer = Tokenizer(BPE(bpe_vocab, merges)) # add the special tokens from phi3 tokenizer config tokenizer.add_special_tokens( [ AddedToken("</s>", rstrip=True, lstrip=False, normalized=False, special=True), AddedToken("<|endoftext|>", normalized=False, special=True), AddedToken("<|assistant|>", rstrip=True, normalized=False, special=True), AddedToken("<|placeholder1|>", rstrip=True, normalized=False, special=True), AddedToken("<|placeholder2|>", rstrip=True, normalized=False, special=True), AddedToken("<|placeholder3|>", rstrip=True, normalized=False, special=True), AddedToken("<|placeholder4|>", rstrip=True, normalized=False, special=True), AddedToken("<|system|>", rstrip=True, normalized=False, special=True), AddedToken("<|end|>", rstrip=True, normalized=False, special=True), AddedToken("<|placeholder5|>", rstrip=True, normalized=False, special=True), AddedToken("<|placeholder6|>", rstrip=True, normalized=False, special=True), AddedToken("<|user|>", rstrip=True, normalized=False, special=True), ] ) self.additional_kwargs["unk_token"] = ( proto.tokens[proto.unk_token_id] if proto.unk_token_id is not None else None ) self.additional_kwargs["eos_token"] = ( proto.tokens[proto.eos_token_id] if proto.eos_token_id is not None else None ) self.additional_kwargs["bos_token"] = ( proto.tokens[proto.bos_token_id] if proto.bos_token_id is not None else None ) self.additional_kwargs["pad_token"] = ( proto.tokens[proto.pad_token_id] if proto.pad_token_id is not None else None ) return tokenizer def decoder(self, replacement, add_prefix_space): sequence = [ decoders.ByteFallback(), decoders.Fuse(), decoders.Replace(replacement, " "), ] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def converted(self) -> Tokenizer: tokenizer = self.tokenizer(self.proto) replacement = "▁" add_prefix_space = True if hasattr(self.original_tokenizer, "add_prefix_space"): add_prefix_space = self.original_tokenizer.add_prefix_space tokenizer.decoder = self.decoder(replacement, add_prefix_space) return tokenizer class GGUFGPTConverter(GPT2Converter): def __init__(self, tokenizer_dict): self.original_tokenizer = GGUFTokenizerSkeleton(tokenizer_dict) self.additional_kwargs = {} def converted(self) -> Tokenizer: vocab = {word: i for i, word in enumerate(self.original_tokenizer.tokens)} merges = self.original_tokenizer.merges tokenizer = super().converted(vocab, merges) return tokenizer class GGUFT5Converter(T5Converter): def __init__(self, tokenizer_dict): # set dummy data to avoid unnecessary merges calculation tokenizer_dict["merges"] = ["dummy text"] self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.token2id = {k: v for v, k in enumerate(self.proto.tokens)} self.original_tokenizer = self.proto self.additional_kwargs = {} def vocab(self, proto): return list(zip(proto.tokens, proto.scores)) def normalizer(self, proto): if getattr(self.original_tokenizer, "legacy", True): sequence = [] if getattr(self.original_tokenizer, "add_prefix_space", True): sequence += [normalizers.Prepend(prepend="▁")] sequence += [normalizers.Replace(pattern=" ", content="▁")] return normalizers.Sequence(sequence) return None # non-legacy, no normalizer def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.token2id["</s>"]), ], ) def converted(self) -> Tokenizer: vocab_scores = self.vocab(self.proto) tokenizer = Tokenizer( Unigram( vocab_scores, unk_id=self.proto.unk_token_id, byte_fallback=False, ) ) # Tokenizer assemble normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = "▁" add_prefix_space = True if hasattr(self.original_tokenizer, "add_prefix_space"): add_prefix_space = self.original_tokenizer.add_prefix_space pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer tokenizer.decoder = self.decoder(replacement, add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor return tokenizer class GGUFGemmaConverter(GemmaConverter): def __init__(self, tokenizer_dict): # set dummy data to avoid unnecessary merges calculation tokenizer_dict["merges"] = ["dummy text"] self.proto = GGUFTokenizerSkeleton(tokenizer_dict) self.original_tokenizer = self.proto self.additional_kwargs = {} def vocab(self, proto): original_vocab = list(zip(proto.tokens, proto.scores)) updated_vocab = [] for token, score in original_vocab: if token == "<0x09>": updated_vocab.append(("\t", score)) elif " " in token and len(token.strip()) == 0: underscores = "▁" * len(token) updated_vocab.append((underscores, score)) else: updated_vocab.append((token, score)) return updated_vocab def normalizer(self, proto): return normalizers.Replace(" ", "▁") def decoder(self, replacement, add_prefix_space): sequence = [ decoders.Replace("▁", " "), decoders.ByteFallback(), decoders.Fuse(), ] if add_prefix_space: sequence += [decoders.Strip(content=" ", left=1)] return decoders.Sequence(sequence) def converted(self) -> Tokenizer: vocab_scores = self.vocab(self.proto) tokenizer = Tokenizer( Unigram( vocab_scores, unk_id=self.proto.unk_token_id, byte_fallback=self.handle_byte_fallback, ) ) normalizer = self.normalizer(self.proto) if normalizer is not None: tokenizer.normalizer = normalizer replacement = "▁" add_prefix_space = True if hasattr(self.original_tokenizer, "add_prefix_space"): add_prefix_space = self.original_tokenizer.add_prefix_space tokenizer.decoder = self.decoder(replacement, add_prefix_space) pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) if pre_tokenizer is not None: tokenizer.pre_tokenizer = pre_tokenizer return tokenizer GGUF_TO_FAST_CONVERTERS = { "llama": GGUFLlamaConverter, "qwen2": GGUFQwen2Converter, "qwen2_moe": GGUFQwen2Converter, "phi3": GGUFPhi3Converter, "bloom": GGUFGPTConverter, "falcon": GGUFGPTConverter, "stablelm": GGUFGPTConverter, "gpt2": GGUFGPTConverter, "starcoder2": GGUFGPTConverter, "t5": GGUFT5Converter, "mamba": GGUFGPTConverter, "nemotron": GGUFGPTConverter, "gemma2": GGUFGemmaConverter, } def convert_gguf_tokenizer(architecture, tokenizer_dict) -> Tokenizer: """ Utilities to convert a slow tokenizer instance in a fast tokenizer instance. Args: architecture (`str`): The model architecture derived from gguf file. transformer_tokenizer ([`~tokenization_utils_base.PreTrainedTokenizer`]): Instance of a slow tokenizer to convert in the backend tokenizer for [`~tokenization_utils_base.PreTrainedTokenizerFast`]. Return: A instance of [`~tokenizers.Tokenizer`] to be used as the backend tokenizer of a [`~tokenization_utils_base.PreTrainedTokenizerFast`] """ tokenizer_class_name = architecture converter = GGUF_TO_FAST_CONVERTERS[tokenizer_class_name](tokenizer_dict) fast_tokenizer = converter.converted() return fast_tokenizer, converter.additional_kwargs
transformers/src/transformers/integrations/ggml.py/0
{ "file_path": "transformers/src/transformers/integrations/ggml.py", "repo_id": "transformers", "token_count": 12259 }
87
#include <torch/extension.h> #include <ATen/ATen.h> #include "cuda_launch.h" #include "cuda_kernel.h" #include <vector> ////////////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////////////// std::vector<at::Tensor> index_max_kernel( at::Tensor index_vals, // [batch_size, 32, num_block] at::Tensor indices, // [batch_size, num_block], int A_num_block, int B_num_block ) { int batch_size = indices.size(0); int num_block = indices.size(1); at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options()); at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options()); dim3 threads(256); dim3 blocks(batch_size); int shared_mem = A_num_block * 32 * sizeof(float); index_max_cuda_kernel<<<blocks, threads, shared_mem>>>( index_vals.data_ptr<float>(), indices.data_ptr<int>(), max_vals.data_ptr<float>(), max_vals_scatter.data_ptr<float>(), batch_size, A_num_block, B_num_block, num_block ); return {max_vals, max_vals_scatter}; } at::Tensor mm_to_sparse_kernel( at::Tensor dense_A, // [batch_size, A_num_block, dim, 32] at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] at::Tensor indices // [batch_size, num_block] ) { int batch_size = dense_A.size(0); int A_num_block = dense_A.size(1); int B_num_block = dense_B.size(1); int dim = dense_A.size(2); int num_block = indices.size(1); at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); dim3 threads(64, 4); dim3 blocks(num_block / 4, batch_size); mm_to_sparse_cuda_kernel<<<blocks, threads>>>( dense_A.data_ptr<float>(), dense_B.data_ptr<float>(), indices.data_ptr<int>(), sparse_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, dim, num_block ); return sparse_C; } at::Tensor sparse_dense_mm_kernel( at::Tensor sparse_A, // [batch_size, num_block, 32, 32] at::Tensor indices, // [batch_size, num_block] at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] int A_num_block ) { int batch_size = sparse_A.size(0); int num_block = sparse_A.size(1); int B_num_block = dense_B.size(1); int dim = dense_B.size(2); at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options()); dim3 threads(128, 2); dim3 blocks(num_block / 2, batch_size); sparse_dense_mm_cuda_kernel<<<blocks, threads>>>( sparse_A.data_ptr<float>(), indices.data_ptr<int>(), dense_B.data_ptr<float>(), dense_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, dim, num_block ); return dense_C; } at::Tensor reduce_sum_kernel( at::Tensor sparse_A, // [batch_size, num_block, 32, 32] at::Tensor indices, // [batch_size, num_block] int A_num_block, int B_num_block ) { int batch_size = sparse_A.size(0); int num_block = sparse_A.size(1); at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options()); dim3 threads(32, 4); dim3 blocks(num_block / 4, batch_size); reduce_sum_cuda_kernel<<<blocks, threads>>>( sparse_A.data_ptr<float>(), indices.data_ptr<int>(), dense_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, num_block ); return dense_C; } at::Tensor scatter_kernel( at::Tensor dense_A, // [batch_size, A_num_block, 32] at::Tensor indices, // [batch_size, num_block] int B_num_block ) { int batch_size = dense_A.size(0); int A_num_block = dense_A.size(1); int num_block = indices.size(1); at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); dim3 threads(32, 4); dim3 blocks(num_block / 4, batch_size); scatter_cuda_kernel<<<blocks, threads>>>( dense_A.data_ptr<float>(), indices.data_ptr<int>(), sparse_C.data_ptr<float>(), batch_size, A_num_block, B_num_block, num_block ); return sparse_C; }
transformers/src/transformers/kernels/mra/cuda_launch.cu/0
{ "file_path": "transformers/src/transformers/kernels/mra/cuda_launch.cu", "repo_id": "transformers", "token_count": 1668 }
88
# Copyright 2024 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List, Optional import torch import torch.nn as nn from torch import Tensor from ..utils import is_accelerate_available, is_scipy_available, is_vision_available, requires_backends if is_accelerate_available(): from accelerate import PartialState from accelerate.utils import reduce if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_vision_available(): from transformers.image_transforms import center_to_corners_format def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py class ImageLoss(nn.Module): """ This class computes the losses for DetrForObjectDetection/DetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). A note on the `num_classes` argument (copied from original repo in detr.py): "the naming of the `num_classes` parameter of the criterion is somewhat misleading. It indeed corresponds to `max_obj_id` + 1, where `max_obj_id` is the maximum id for a class in your dataset. For example, COCO has a `max_obj_id` of 90, so we pass `num_classes` to be 91. As another example, for a dataset that has a single class with `id` 1, you should pass `num_classes` to be 2 (`max_obj_id` + 1). For more details on this, check the following discussion https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223" Args: matcher (`DetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. eos_coef (`float`): Relative classification weight applied to the no-object category. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__(self, matcher, num_classes, eos_coef, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.eos_coef = eos_coef self.losses = losses empty_weight = torch.ones(self.num_classes + 1) empty_weight[-1] = self.eos_coef self.register_buffer("empty_weight", empty_weight) # removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (NLL) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o loss_ce = nn.functional.cross_entropy(source_logits.transpose(1, 2), target_classes, self.empty_weight) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) world_size = 1 if is_accelerate_available(): if PartialState._shared_state != {}: num_boxes = reduce(num_boxes) world_size = PartialState().num_processes num_boxes = torch.clamp(num_boxes / world_size, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses # taken from https://github.com/facebookresearch/detr/blob/master/models/matcher.py class HungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. Contrary to the loss, we don't use the NLL, # but approximate it in 1 - proba[target class]. # The 1 is a constant that doesn't change the matching, it can be ommitted. class_cost = -out_prob[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # below: bounding box utilities taken from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # modified from torchvision to also return the union def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # below: taken from https://github.com/facebookresearch/detr/blob/master/util/misc.py#L306 def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes class NestedTensor: def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask) # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.jit.unused def _set_aux_loss(outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] def ForSegmentationLoss( logits, labels, device, pred_boxes, pred_masks, config, outputs_class=None, outputs_coord=None, **kwargs ): # First: create the matcher matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost) # Second: create the criterion losses = ["labels", "boxes", "cardinality", "masks"] criterion = ImageLoss( matcher=matcher, num_classes=config.num_labels, eos_coef=config.eos_coefficient, losses=losses, ) criterion.to(device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes outputs_loss["pred_masks"] = pred_masks auxiliary_outputs = None if config.auxiliary_loss: auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient} weight_dict["loss_giou"] = config.giou_loss_coefficient weight_dict["loss_mask"] = config.mask_loss_coefficient weight_dict["loss_dice"] = config.dice_loss_coefficient if config.auxiliary_loss: aux_weight_dict = {} for i in range(config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) return loss, loss_dict, auxiliary_outputs def ForObjectDetectionLoss( logits, labels, device, pred_boxes, config, outputs_class=None, outputs_coord=None, **kwargs ): # First: create the matcher matcher = HungarianMatcher(class_cost=config.class_cost, bbox_cost=config.bbox_cost, giou_cost=config.giou_cost) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = ImageLoss( matcher=matcher, num_classes=config.num_labels, eos_coef=config.eos_coefficient, losses=losses, ) criterion.to(device) # Third: compute the losses, based on outputs and labels outputs_loss = {} auxiliary_outputs = None outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if config.auxiliary_loss: auxiliary_outputs = _set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": config.bbox_loss_coefficient} weight_dict["loss_giou"] = config.giou_loss_coefficient if config.auxiliary_loss: aux_weight_dict = {} for i in range(config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) return loss, loss_dict, auxiliary_outputs
transformers/src/transformers/loss/loss_for_object_detection.py/0
{ "file_path": "transformers/src/transformers/loss/loss_for_object_detection.py", "repo_id": "transformers", "token_count": 9857 }
89
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import ( albert, align, altclip, aria, audio_spectrogram_transformer, auto, autoformer, bamba, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_2, bloom, bridgetower, bros, byt5, camembert, canine, chameleon, chinese_clip, clap, clip, clipseg, clvp, code_llama, codegen, cohere, cohere2, colpali, conditional_detr, convbert, convnext, convnextv2, cpm, cpmant, ctrl, cvt, dac, data2vec, dbrx, deberta, deberta_v2, decision_transformer, deformable_detr, deit, deprecated, depth_anything, detr, dialogpt, diffllama, dinat, dinov2, dinov2_with_registers, distilbert, dit, donut, dpr, dpt, efficientnet, electra, emu3, encodec, encoder_decoder, ernie, esm, falcon, falcon_mamba, fastspeech2_conformer, flaubert, flava, fnet, focalnet, fsmt, funnel, fuyu, gemma, gemma2, git, glm, glpn, gpt2, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_sw3, gptj, granite, granitemoe, grounding_dino, groupvit, helium, herbert, hiera, hubert, ibert, idefics, idefics2, idefics3, ijepa, imagegpt, informer, instructblip, instructblipvideo, jamba, jetmoe, kosmos2, layoutlm, layoutlmv2, layoutlmv3, layoutxlm, led, levit, lilt, llama, llava, llava_next, llava_next_video, llava_onevision, longformer, longt5, luke, lxmert, m2m_100, mamba, mamba2, marian, markuplm, mask2former, maskformer, mbart, mbart50, megatron_bert, megatron_gpt2, mgp_str, mimi, mistral, mixtral, mllama, mluke, mobilebert, mobilenet_v1, mobilenet_v2, mobilevit, mobilevitv2, modernbert, moonshine, moshi, mpnet, mpt, mra, mt5, musicgen, musicgen_melody, mvp, myt5, nemotron, nllb, nllb_moe, nougat, nystromformer, olmo, olmo2, olmoe, omdet_turbo, oneformer, openai, opt, owlv2, owlvit, paligemma, patchtsmixer, patchtst, pegasus, pegasus_x, perceiver, persimmon, phi, phi3, phimoe, phobert, pix2struct, pixtral, plbart, poolformer, pop2piano, prophetnet, pvt, pvt_v2, qwen2, qwen2_5_vl, qwen2_audio, qwen2_moe, qwen2_vl, rag, recurrent_gemma, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rt_detr, rwkv, sam, seamless_m4t, seamless_m4t_v2, segformer, seggpt, sew, sew_d, siglip, speech_encoder_decoder, speech_to_text, speecht5, splinter, squeezebert, stablelm, starcoder2, superglue, superpoint, swiftformer, swin, swin2sr, swinv2, switch_transformers, t5, table_transformer, tapas, textnet, time_series_transformer, timesformer, timm_backbone, timm_wrapper, trocr, tvp, udop, umt5, unispeech, unispeech_sat, univnet, upernet, video_llava, videomae, vilt, vipllava, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_mae, vit_msn, vitdet, vitmatte, vitpose, vitpose_backbone, vits, vivit, wav2vec2, wav2vec2_bert, wav2vec2_conformer, wav2vec2_phoneme, wav2vec2_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, zamba, zamba2, zoedepth, )
transformers/src/transformers/models/__init__.py/0
{ "file_path": "transformers/src/transformers/models/__init__.py", "repo_id": "transformers", "token_count": 2564 }
90
# coding=utf-8 # Copyright 2022 The BAAI Teams Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch AltCLIP model.""" import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.nn as nn import torch.utils.checkpoint from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, BaseModelOutputWithPoolingAndProjection, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, torch_int from .configuration_altclip import AltCLIPConfig, AltCLIPTextConfig, AltCLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "BAAI/AltCLIP" _CONFIG_FOR_DOC = "AltCLIPConfig" ALTCLIP_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`CLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ ALTCLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ ALTCLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. interpolate_pos_encoding (`bool`, *optional*, defaults `False`): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ ALTCLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. interpolate_pos_encoding (`bool`, *optional*, defaults `False`): Whether to interpolate the pre-trained position encodings. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # contrastive loss function, adapted from # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass # Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->AltCLIP class AltCLIPOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`]. image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`]. text_model_output (`BaseModelOutputWithPooling`): The output of the [`AltCLIPTextModel`]. vision_model_output (`BaseModelOutputWithPooling`): The output of the [`AltCLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPooling = None vision_model_output: BaseModelOutputWithPooling = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.roberta.modeling_roberta.RobertaEmbeddings with Roberta->AltRoberta class AltRobertaEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfAttention with Roberta->AltRoberta class AltRobertaSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in AltRobertaModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput class AltRobertaSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states ALT_ROBERTA_SELF_ATTENTION_CLASSES = { "eager": AltRobertaSelfAttention, } # Copied from transformers.models.roberta.modeling_roberta.RobertaAttention with Roberta->AltRoberta,ROBERTA->ALT_ROBERTA class AltRobertaAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ALT_ROBERTA_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = AltRobertaSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.roberta.modeling_roberta.RobertaIntermediate with Roberta->AltRoberta class AltRobertaIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.roberta.modeling_roberta.RobertaOutput class AltRobertaOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.roberta.modeling_roberta.RobertaLayer with Roberta->AltRoberta class AltRobertaLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = AltRobertaAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = AltRobertaAttention(config, position_embedding_type="absolute") self.intermediate = AltRobertaIntermediate(config) self.output = AltRobertaOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.roberta.modeling_roberta.RobertaEncoder with Roberta->AltRoberta class AltRobertaEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([AltRobertaLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.roberta.modeling_roberta.RobertaPooler class AltRobertaPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.clip.modeling_clip.CLIPAttention with CLIP->AltCLIP class AltCLIPAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) # apply the causal_attention_mask first if causal_attention_mask is not None: if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is" f" {causal_attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->AltCLIP class AltCLIPMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states class AltCLIPEncoderLayer(nn.Module): def __init__(self, config: AltCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = AltCLIPAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = AltCLIPMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, causal_attention_mask: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. `(config.encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, causal_attention_mask=causal_attention_mask, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class AltCLIPEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`AltCLIPEncoderLayer`]. Args: config: AltCLIPConfig """ def __init__(self, config: AltCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([AltCLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, attention_mask: Optional[torch.Tensor] = None, causal_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Causal mask for the text model. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( encoder_layer.__call__, hidden_states, attention_mask, causal_attention_mask, output_attentions, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, causal_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->AltCLIP class AltCLIPVisionEmbeddings(nn.Module): def __init__(self, config: AltCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False) def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor: """ This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution images. This method is also adapted to support torch.jit tracing. Adapted from: - https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and - https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211 """ num_patches = embeddings.shape[1] - 1 position_embedding = self.position_embedding.weight.unsqueeze(0) num_positions = position_embedding.shape[1] - 1 # always interpolate when tracing to ensure the exported model works for dynamic input shapes if not torch.jit.is_tracing() and num_patches == num_positions and height == width: return self.position_embedding(self.position_ids) class_pos_embed = position_embedding[:, :1] patch_pos_embed = position_embedding[:, 1:] dim = embeddings.shape[-1] new_height = height // self.patch_size new_width = width // self.patch_size sqrt_num_positions = torch_int(num_positions**0.5) patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim) patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2) patch_pos_embed = nn.functional.interpolate( patch_pos_embed, size=(new_height, new_width), mode="bicubic", align_corners=False, ) patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim) return torch.cat((class_pos_embed, patch_pos_embed), dim=1) def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor: batch_size, _, height, width = pixel_values.shape if not interpolate_pos_encoding and (height != self.image_size or width != self.image_size): raise ValueError( f"Input image size ({height}*{width}) doesn't match model" f" ({self.image_size}*{self.image_size})." ) target_dtype = self.patch_embedding.weight.dtype patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) if interpolate_pos_encoding: embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width) else: embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings class AltCLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = AltCLIPConfig base_model_prefix = "altclip" supports_gradient_checkpointing = True _no_split_module = [] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, AltCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, AltCLIPAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, AltCLIPMLP): factor = self.config.initializer_factor in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, AltCLIPModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) module.text_projection._is_hf_initialized = True nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) module.visual_projection._is_hf_initialized = True elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_factor) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() class AltCLIPVisionTransformer(nn.Module): def __init__(self, config: AltCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = AltCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = AltCLIPEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, interpolate_pos_encoding: Optional[bool] = False, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) class AltCLIPVisionModel(AltCLIPPreTrainedModel): config_class = AltCLIPVisionConfig main_input_name = "pixel_values" def __init__(self, config: AltCLIPVisionConfig): super().__init__(config) self.vision_model = AltCLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=AltCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, AltCLIPVisionModel >>> model = AltCLIPVisionModel.from_pretrained("BAAI/AltCLIP") >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) class AltRobertaModel(AltCLIPPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in *Attention is all you need*_ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. .. _*Attention is all you need*: https://arxiv.org/abs/1706.03762 """ config_class = AltCLIPTextConfig # Copied from transformers.models.clap.modeling_clap.ClapTextModel.__init__ with ClapText->AltRoberta def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = AltRobertaEmbeddings(config) self.encoder = AltRobertaEncoder(config) self.pooler = AltRobertaPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) # Copied from transformers.models.clap.modeling_clap.ClapTextModel.forward def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class AltCLIPTextModel(AltCLIPPreTrainedModel): config_class = AltCLIPTextConfig def __init__(self, config): super().__init__(config) self.roberta = AltRobertaModel(config, add_pooling_layer=False) self.transformation = nn.Linear(config.hidden_size, config.project_dim) self.pre_LN = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.post_init() def get_input_embeddings(self) -> nn.Module: return self.roberta.embeddings.word_embeddings def set_input_embeddings(self, value: nn.Embedding) -> None: self.roberta.embeddings.word_embeddings = value def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding: return super().resize_token_embeddings(new_num_tokens) @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPoolingAndProjection, config_class=AltCLIPTextConfig) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, output_hidden_states: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPoolingAndProjection]: r""" Returns: Examples: ```python >>> from transformers import AutoProcessor, AltCLIPTextModel >>> model = AltCLIPTextModel.from_pretrained("BAAI/AltCLIP") >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") >>> texts = ["it's a cat", "it's a dog"] >>> inputs = processor(text=texts, padding=True, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.roberta( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # last module outputs sequence_output = outputs[0] # project every module sequence_output = self.pre_LN(sequence_output) # pooler projection_state = self.transformation(sequence_output) pooler_output = projection_state[:, 0] if not return_dict: return (projection_state, pooler_output) + outputs[2:4] return BaseModelOutputWithPoolingAndProjection( last_hidden_state=projection_state, pooler_output=pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class AltCLIPModel(AltCLIPPreTrainedModel): config_class = AltCLIPConfig def __init__(self, config: AltCLIPConfig): super().__init__(config) if not isinstance(config.vision_config, AltCLIPVisionConfig): raise TypeError( "config.vision_config is expected to be of type AltCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) if not isinstance(config.text_config, AltCLIPTextConfig): raise TypeError( "config.text_config is expected to be of type AltCLIPTextConfig but is of type" f" {type(config.text_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.project_dim self.vision_embed_dim = vision_config.hidden_size self.text_model = AltCLIPTextModel(text_config) self.vision_model = AltCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value)) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(ALTCLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, token_type_ids=None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPTextModel`]. Examples: ```python >>> from transformers import AutoProcessor, AltCLIPModel >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") >>> inputs = processor(text=["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) ```""" # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, token_type_ids=token_type_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[1] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(ALTCLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`AltCLIPVisionModel`]. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, AltCLIPModel >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) ```""" # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(ALTCLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=AltCLIPOutput, config_class=AltCLIPConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.Tensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, interpolate_pos_encoding: bool = False, return_dict: Optional[bool] = None, ) -> Union[Tuple, AltCLIPOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, AltCLIPModel >>> model = AltCLIPModel.from_pretrained("BAAI/AltCLIP") >>> processor = AutoProcessor.from_pretrained("BAAI/AltCLIP") >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor( ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True ... ) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use AltCLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, interpolate_pos_encoding=interpolate_pos_encoding, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[1] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.T loss = None if return_loss: loss = clip_loss(logits_per_text) if not return_dict: output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return AltCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, ) # Copied from transformers.models.roberta.modeling_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx, past_key_values_length=0): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx __all__ = ["AltCLIPPreTrainedModel", "AltCLIPVisionModel", "AltCLIPTextModel", "AltCLIPModel"]
transformers/src/transformers/models/altclip/modeling_altclip.py/0
{ "file_path": "transformers/src/transformers/models/altclip/modeling_altclip.py", "repo_id": "transformers", "token_count": 34379 }
91
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Auto Config class.""" import importlib import os import re import warnings from collections import OrderedDict from typing import List, Union from ...configuration_utils import PretrainedConfig from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code from ...utils import CONFIG_NAME, logging logger = logging.get_logger(__name__) CONFIG_MAPPING_NAMES = OrderedDict( [ # Add configs here ("albert", "AlbertConfig"), ("align", "AlignConfig"), ("altclip", "AltCLIPConfig"), ("aria", "AriaConfig"), ("aria_text", "AriaTextConfig"), ("audio-spectrogram-transformer", "ASTConfig"), ("autoformer", "AutoformerConfig"), ("bamba", "BambaConfig"), ("bark", "BarkConfig"), ("bart", "BartConfig"), ("beit", "BeitConfig"), ("bert", "BertConfig"), ("bert-generation", "BertGenerationConfig"), ("big_bird", "BigBirdConfig"), ("bigbird_pegasus", "BigBirdPegasusConfig"), ("biogpt", "BioGptConfig"), ("bit", "BitConfig"), ("blenderbot", "BlenderbotConfig"), ("blenderbot-small", "BlenderbotSmallConfig"), ("blip", "BlipConfig"), ("blip-2", "Blip2Config"), ("bloom", "BloomConfig"), ("bridgetower", "BridgeTowerConfig"), ("bros", "BrosConfig"), ("camembert", "CamembertConfig"), ("canine", "CanineConfig"), ("chameleon", "ChameleonConfig"), ("chinese_clip", "ChineseCLIPConfig"), ("chinese_clip_vision_model", "ChineseCLIPVisionConfig"), ("clap", "ClapConfig"), ("clip", "CLIPConfig"), ("clip_text_model", "CLIPTextConfig"), ("clip_vision_model", "CLIPVisionConfig"), ("clipseg", "CLIPSegConfig"), ("clvp", "ClvpConfig"), ("code_llama", "LlamaConfig"), ("codegen", "CodeGenConfig"), ("cohere", "CohereConfig"), ("cohere2", "Cohere2Config"), ("colpali", "ColPaliConfig"), ("conditional_detr", "ConditionalDetrConfig"), ("convbert", "ConvBertConfig"), ("convnext", "ConvNextConfig"), ("convnextv2", "ConvNextV2Config"), ("cpmant", "CpmAntConfig"), ("ctrl", "CTRLConfig"), ("cvt", "CvtConfig"), ("dac", "DacConfig"), ("data2vec-audio", "Data2VecAudioConfig"), ("data2vec-text", "Data2VecTextConfig"), ("data2vec-vision", "Data2VecVisionConfig"), ("dbrx", "DbrxConfig"), ("deberta", "DebertaConfig"), ("deberta-v2", "DebertaV2Config"), ("decision_transformer", "DecisionTransformerConfig"), ("deformable_detr", "DeformableDetrConfig"), ("deit", "DeiTConfig"), ("depth_anything", "DepthAnythingConfig"), ("deta", "DetaConfig"), ("detr", "DetrConfig"), ("diffllama", "DiffLlamaConfig"), ("dinat", "DinatConfig"), ("dinov2", "Dinov2Config"), ("dinov2_with_registers", "Dinov2WithRegistersConfig"), ("distilbert", "DistilBertConfig"), ("donut-swin", "DonutSwinConfig"), ("dpr", "DPRConfig"), ("dpt", "DPTConfig"), ("efficientformer", "EfficientFormerConfig"), ("efficientnet", "EfficientNetConfig"), ("electra", "ElectraConfig"), ("emu3", "Emu3Config"), ("encodec", "EncodecConfig"), ("encoder-decoder", "EncoderDecoderConfig"), ("ernie", "ErnieConfig"), ("ernie_m", "ErnieMConfig"), ("esm", "EsmConfig"), ("falcon", "FalconConfig"), ("falcon_mamba", "FalconMambaConfig"), ("fastspeech2_conformer", "FastSpeech2ConformerConfig"), ("flaubert", "FlaubertConfig"), ("flava", "FlavaConfig"), ("fnet", "FNetConfig"), ("focalnet", "FocalNetConfig"), ("fsmt", "FSMTConfig"), ("funnel", "FunnelConfig"), ("fuyu", "FuyuConfig"), ("gemma", "GemmaConfig"), ("gemma2", "Gemma2Config"), ("git", "GitConfig"), ("glm", "GlmConfig"), ("glpn", "GLPNConfig"), ("gpt-sw3", "GPT2Config"), ("gpt2", "GPT2Config"), ("gpt_bigcode", "GPTBigCodeConfig"), ("gpt_neo", "GPTNeoConfig"), ("gpt_neox", "GPTNeoXConfig"), ("gpt_neox_japanese", "GPTNeoXJapaneseConfig"), ("gptj", "GPTJConfig"), ("gptsan-japanese", "GPTSanJapaneseConfig"), ("granite", "GraniteConfig"), ("granitemoe", "GraniteMoeConfig"), ("granitevision", "LlavaNextConfig"), ("graphormer", "GraphormerConfig"), ("grounding-dino", "GroundingDinoConfig"), ("groupvit", "GroupViTConfig"), ("helium", "HeliumConfig"), ("hiera", "HieraConfig"), ("hubert", "HubertConfig"), ("ibert", "IBertConfig"), ("idefics", "IdeficsConfig"), ("idefics2", "Idefics2Config"), ("idefics3", "Idefics3Config"), ("idefics3_vision", "Idefics3VisionConfig"), ("ijepa", "IJepaConfig"), ("imagegpt", "ImageGPTConfig"), ("informer", "InformerConfig"), ("instructblip", "InstructBlipConfig"), ("instructblipvideo", "InstructBlipVideoConfig"), ("jamba", "JambaConfig"), ("jetmoe", "JetMoeConfig"), ("jukebox", "JukeboxConfig"), ("kosmos-2", "Kosmos2Config"), ("layoutlm", "LayoutLMConfig"), ("layoutlmv2", "LayoutLMv2Config"), ("layoutlmv3", "LayoutLMv3Config"), ("led", "LEDConfig"), ("levit", "LevitConfig"), ("lilt", "LiltConfig"), ("llama", "LlamaConfig"), ("llava", "LlavaConfig"), ("llava_next", "LlavaNextConfig"), ("llava_next_video", "LlavaNextVideoConfig"), ("llava_onevision", "LlavaOnevisionConfig"), ("longformer", "LongformerConfig"), ("longt5", "LongT5Config"), ("luke", "LukeConfig"), ("lxmert", "LxmertConfig"), ("m2m_100", "M2M100Config"), ("mamba", "MambaConfig"), ("mamba2", "Mamba2Config"), ("marian", "MarianConfig"), ("markuplm", "MarkupLMConfig"), ("mask2former", "Mask2FormerConfig"), ("maskformer", "MaskFormerConfig"), ("maskformer-swin", "MaskFormerSwinConfig"), ("mbart", "MBartConfig"), ("mctct", "MCTCTConfig"), ("mega", "MegaConfig"), ("megatron-bert", "MegatronBertConfig"), ("mgp-str", "MgpstrConfig"), ("mimi", "MimiConfig"), ("mistral", "MistralConfig"), ("mixtral", "MixtralConfig"), ("mllama", "MllamaConfig"), ("mobilebert", "MobileBertConfig"), ("mobilenet_v1", "MobileNetV1Config"), ("mobilenet_v2", "MobileNetV2Config"), ("mobilevit", "MobileViTConfig"), ("mobilevitv2", "MobileViTV2Config"), ("modernbert", "ModernBertConfig"), ("moonshine", "MoonshineConfig"), ("moshi", "MoshiConfig"), ("mpnet", "MPNetConfig"), ("mpt", "MptConfig"), ("mra", "MraConfig"), ("mt5", "MT5Config"), ("musicgen", "MusicgenConfig"), ("musicgen_melody", "MusicgenMelodyConfig"), ("mvp", "MvpConfig"), ("nat", "NatConfig"), ("nemotron", "NemotronConfig"), ("nezha", "NezhaConfig"), ("nllb-moe", "NllbMoeConfig"), ("nougat", "VisionEncoderDecoderConfig"), ("nystromformer", "NystromformerConfig"), ("olmo", "OlmoConfig"), ("olmo2", "Olmo2Config"), ("olmoe", "OlmoeConfig"), ("omdet-turbo", "OmDetTurboConfig"), ("oneformer", "OneFormerConfig"), ("open-llama", "OpenLlamaConfig"), ("openai-gpt", "OpenAIGPTConfig"), ("opt", "OPTConfig"), ("owlv2", "Owlv2Config"), ("owlvit", "OwlViTConfig"), ("paligemma", "PaliGemmaConfig"), ("patchtsmixer", "PatchTSMixerConfig"), ("patchtst", "PatchTSTConfig"), ("pegasus", "PegasusConfig"), ("pegasus_x", "PegasusXConfig"), ("perceiver", "PerceiverConfig"), ("persimmon", "PersimmonConfig"), ("phi", "PhiConfig"), ("phi3", "Phi3Config"), ("phimoe", "PhimoeConfig"), ("pix2struct", "Pix2StructConfig"), ("pixtral", "PixtralVisionConfig"), ("plbart", "PLBartConfig"), ("poolformer", "PoolFormerConfig"), ("pop2piano", "Pop2PianoConfig"), ("prophetnet", "ProphetNetConfig"), ("pvt", "PvtConfig"), ("pvt_v2", "PvtV2Config"), ("qdqbert", "QDQBertConfig"), ("qwen2", "Qwen2Config"), ("qwen2_5_vl", "Qwen2_5_VLConfig"), ("qwen2_audio", "Qwen2AudioConfig"), ("qwen2_audio_encoder", "Qwen2AudioEncoderConfig"), ("qwen2_moe", "Qwen2MoeConfig"), ("qwen2_vl", "Qwen2VLConfig"), ("rag", "RagConfig"), ("realm", "RealmConfig"), ("recurrent_gemma", "RecurrentGemmaConfig"), ("reformer", "ReformerConfig"), ("regnet", "RegNetConfig"), ("rembert", "RemBertConfig"), ("resnet", "ResNetConfig"), ("retribert", "RetriBertConfig"), ("roberta", "RobertaConfig"), ("roberta-prelayernorm", "RobertaPreLayerNormConfig"), ("roc_bert", "RoCBertConfig"), ("roformer", "RoFormerConfig"), ("rt_detr", "RTDetrConfig"), ("rt_detr_resnet", "RTDetrResNetConfig"), ("rwkv", "RwkvConfig"), ("sam", "SamConfig"), ("seamless_m4t", "SeamlessM4TConfig"), ("seamless_m4t_v2", "SeamlessM4Tv2Config"), ("segformer", "SegformerConfig"), ("seggpt", "SegGptConfig"), ("sew", "SEWConfig"), ("sew-d", "SEWDConfig"), ("siglip", "SiglipConfig"), ("siglip_vision_model", "SiglipVisionConfig"), ("speech-encoder-decoder", "SpeechEncoderDecoderConfig"), ("speech_to_text", "Speech2TextConfig"), ("speech_to_text_2", "Speech2Text2Config"), ("speecht5", "SpeechT5Config"), ("splinter", "SplinterConfig"), ("squeezebert", "SqueezeBertConfig"), ("stablelm", "StableLmConfig"), ("starcoder2", "Starcoder2Config"), ("superglue", "SuperGlueConfig"), ("superpoint", "SuperPointConfig"), ("swiftformer", "SwiftFormerConfig"), ("swin", "SwinConfig"), ("swin2sr", "Swin2SRConfig"), ("swinv2", "Swinv2Config"), ("switch_transformers", "SwitchTransformersConfig"), ("t5", "T5Config"), ("table-transformer", "TableTransformerConfig"), ("tapas", "TapasConfig"), ("textnet", "TextNetConfig"), ("time_series_transformer", "TimeSeriesTransformerConfig"), ("timesformer", "TimesformerConfig"), ("timm_backbone", "TimmBackboneConfig"), ("timm_wrapper", "TimmWrapperConfig"), ("trajectory_transformer", "TrajectoryTransformerConfig"), ("transfo-xl", "TransfoXLConfig"), ("trocr", "TrOCRConfig"), ("tvlt", "TvltConfig"), ("tvp", "TvpConfig"), ("udop", "UdopConfig"), ("umt5", "UMT5Config"), ("unispeech", "UniSpeechConfig"), ("unispeech-sat", "UniSpeechSatConfig"), ("univnet", "UnivNetConfig"), ("upernet", "UperNetConfig"), ("van", "VanConfig"), ("video_llava", "VideoLlavaConfig"), ("videomae", "VideoMAEConfig"), ("vilt", "ViltConfig"), ("vipllava", "VipLlavaConfig"), ("vision-encoder-decoder", "VisionEncoderDecoderConfig"), ("vision-text-dual-encoder", "VisionTextDualEncoderConfig"), ("visual_bert", "VisualBertConfig"), ("vit", "ViTConfig"), ("vit_hybrid", "ViTHybridConfig"), ("vit_mae", "ViTMAEConfig"), ("vit_msn", "ViTMSNConfig"), ("vitdet", "VitDetConfig"), ("vitmatte", "VitMatteConfig"), ("vitpose", "VitPoseConfig"), ("vitpose_backbone", "VitPoseBackboneConfig"), ("vits", "VitsConfig"), ("vivit", "VivitConfig"), ("wav2vec2", "Wav2Vec2Config"), ("wav2vec2-bert", "Wav2Vec2BertConfig"), ("wav2vec2-conformer", "Wav2Vec2ConformerConfig"), ("wavlm", "WavLMConfig"), ("whisper", "WhisperConfig"), ("xclip", "XCLIPConfig"), ("xglm", "XGLMConfig"), ("xlm", "XLMConfig"), ("xlm-prophetnet", "XLMProphetNetConfig"), ("xlm-roberta", "XLMRobertaConfig"), ("xlm-roberta-xl", "XLMRobertaXLConfig"), ("xlnet", "XLNetConfig"), ("xmod", "XmodConfig"), ("yolos", "YolosConfig"), ("yoso", "YosoConfig"), ("zamba", "ZambaConfig"), ("zamba2", "Zamba2Config"), ("zoedepth", "ZoeDepthConfig"), ] ) MODEL_NAMES_MAPPING = OrderedDict( [ # Add full (and cased) model names here ("albert", "ALBERT"), ("align", "ALIGN"), ("altclip", "AltCLIP"), ("aria", "Aria"), ("aria_text", "AriaText"), ("audio-spectrogram-transformer", "Audio Spectrogram Transformer"), ("autoformer", "Autoformer"), ("bamba", "Bamba"), ("bark", "Bark"), ("bart", "BART"), ("barthez", "BARThez"), ("bartpho", "BARTpho"), ("beit", "BEiT"), ("bert", "BERT"), ("bert-generation", "Bert Generation"), ("bert-japanese", "BertJapanese"), ("bertweet", "BERTweet"), ("big_bird", "BigBird"), ("bigbird_pegasus", "BigBird-Pegasus"), ("biogpt", "BioGpt"), ("bit", "BiT"), ("blenderbot", "Blenderbot"), ("blenderbot-small", "BlenderbotSmall"), ("blip", "BLIP"), ("blip-2", "BLIP-2"), ("bloom", "BLOOM"), ("bort", "BORT"), ("bridgetower", "BridgeTower"), ("bros", "BROS"), ("byt5", "ByT5"), ("camembert", "CamemBERT"), ("canine", "CANINE"), ("chameleon", "Chameleon"), ("chinese_clip", "Chinese-CLIP"), ("chinese_clip_vision_model", "ChineseCLIPVisionModel"), ("clap", "CLAP"), ("clip", "CLIP"), ("clip_text_model", "CLIPTextModel"), ("clip_vision_model", "CLIPVisionModel"), ("clipseg", "CLIPSeg"), ("clvp", "CLVP"), ("code_llama", "CodeLlama"), ("codegen", "CodeGen"), ("cohere", "Cohere"), ("cohere2", "Cohere2"), ("colpali", "ColPali"), ("conditional_detr", "Conditional DETR"), ("convbert", "ConvBERT"), ("convnext", "ConvNeXT"), ("convnextv2", "ConvNeXTV2"), ("cpm", "CPM"), ("cpmant", "CPM-Ant"), ("ctrl", "CTRL"), ("cvt", "CvT"), ("dac", "DAC"), ("data2vec-audio", "Data2VecAudio"), ("data2vec-text", "Data2VecText"), ("data2vec-vision", "Data2VecVision"), ("dbrx", "DBRX"), ("deberta", "DeBERTa"), ("deberta-v2", "DeBERTa-v2"), ("decision_transformer", "Decision Transformer"), ("deformable_detr", "Deformable DETR"), ("deit", "DeiT"), ("deplot", "DePlot"), ("depth_anything", "Depth Anything"), ("depth_anything_v2", "Depth Anything V2"), ("deta", "DETA"), ("detr", "DETR"), ("dialogpt", "DialoGPT"), ("diffllama", "DiffLlama"), ("dinat", "DiNAT"), ("dinov2", "DINOv2"), ("dinov2_with_registers", "DINOv2 with Registers"), ("distilbert", "DistilBERT"), ("dit", "DiT"), ("donut-swin", "DonutSwin"), ("dpr", "DPR"), ("dpt", "DPT"), ("efficientformer", "EfficientFormer"), ("efficientnet", "EfficientNet"), ("electra", "ELECTRA"), ("emu3", "Emu3"), ("encodec", "EnCodec"), ("encoder-decoder", "Encoder decoder"), ("ernie", "ERNIE"), ("ernie_m", "ErnieM"), ("esm", "ESM"), ("falcon", "Falcon"), ("falcon3", "Falcon3"), ("falcon_mamba", "FalconMamba"), ("fastspeech2_conformer", "FastSpeech2Conformer"), ("flan-t5", "FLAN-T5"), ("flan-ul2", "FLAN-UL2"), ("flaubert", "FlauBERT"), ("flava", "FLAVA"), ("fnet", "FNet"), ("focalnet", "FocalNet"), ("fsmt", "FairSeq Machine-Translation"), ("funnel", "Funnel Transformer"), ("fuyu", "Fuyu"), ("gemma", "Gemma"), ("gemma2", "Gemma2"), ("git", "GIT"), ("glm", "GLM"), ("glpn", "GLPN"), ("gpt-sw3", "GPT-Sw3"), ("gpt2", "OpenAI GPT-2"), ("gpt_bigcode", "GPTBigCode"), ("gpt_neo", "GPT Neo"), ("gpt_neox", "GPT NeoX"), ("gpt_neox_japanese", "GPT NeoX Japanese"), ("gptj", "GPT-J"), ("gptsan-japanese", "GPTSAN-japanese"), ("granite", "Granite"), ("granitemoe", "GraniteMoeMoe"), ("granitevision", "LLaVA-NeXT"), ("graphormer", "Graphormer"), ("grounding-dino", "Grounding DINO"), ("groupvit", "GroupViT"), ("helium", "Helium"), ("herbert", "HerBERT"), ("hiera", "Hiera"), ("hubert", "Hubert"), ("ibert", "I-BERT"), ("idefics", "IDEFICS"), ("idefics2", "Idefics2"), ("idefics3", "Idefics3"), ("idefics3_vision", "Idefics3VisionTransformer"), ("ijepa", "I-JEPA"), ("imagegpt", "ImageGPT"), ("informer", "Informer"), ("instructblip", "InstructBLIP"), ("instructblipvideo", "InstructBlipVideo"), ("jamba", "Jamba"), ("jetmoe", "JetMoe"), ("jukebox", "Jukebox"), ("kosmos-2", "KOSMOS-2"), ("layoutlm", "LayoutLM"), ("layoutlmv2", "LayoutLMv2"), ("layoutlmv3", "LayoutLMv3"), ("layoutxlm", "LayoutXLM"), ("led", "LED"), ("levit", "LeViT"), ("lilt", "LiLT"), ("llama", "LLaMA"), ("llama2", "Llama2"), ("llama3", "Llama3"), ("llava", "LLaVa"), ("llava_next", "LLaVA-NeXT"), ("llava_next_video", "LLaVa-NeXT-Video"), ("llava_onevision", "LLaVA-Onevision"), ("longformer", "Longformer"), ("longt5", "LongT5"), ("luke", "LUKE"), ("lxmert", "LXMERT"), ("m2m_100", "M2M100"), ("madlad-400", "MADLAD-400"), ("mamba", "Mamba"), ("mamba2", "mamba2"), ("marian", "Marian"), ("markuplm", "MarkupLM"), ("mask2former", "Mask2Former"), ("maskformer", "MaskFormer"), ("maskformer-swin", "MaskFormerSwin"), ("matcha", "MatCha"), ("mbart", "mBART"), ("mbart50", "mBART-50"), ("mctct", "M-CTC-T"), ("mega", "MEGA"), ("megatron-bert", "Megatron-BERT"), ("megatron_gpt2", "Megatron-GPT2"), ("mgp-str", "MGP-STR"), ("mimi", "Mimi"), ("mistral", "Mistral"), ("mixtral", "Mixtral"), ("mllama", "Mllama"), ("mluke", "mLUKE"), ("mms", "MMS"), ("mobilebert", "MobileBERT"), ("mobilenet_v1", "MobileNetV1"), ("mobilenet_v2", "MobileNetV2"), ("mobilevit", "MobileViT"), ("mobilevitv2", "MobileViTV2"), ("modernbert", "ModernBERT"), ("moonshine", "Moonshine"), ("moshi", "Moshi"), ("mpnet", "MPNet"), ("mpt", "MPT"), ("mra", "MRA"), ("mt5", "MT5"), ("musicgen", "MusicGen"), ("musicgen_melody", "MusicGen Melody"), ("mvp", "MVP"), ("myt5", "myt5"), ("nat", "NAT"), ("nemotron", "Nemotron"), ("nezha", "Nezha"), ("nllb", "NLLB"), ("nllb-moe", "NLLB-MOE"), ("nougat", "Nougat"), ("nystromformer", "Nyströmformer"), ("olmo", "OLMo"), ("olmo2", "OLMo2"), ("olmoe", "OLMoE"), ("omdet-turbo", "OmDet-Turbo"), ("oneformer", "OneFormer"), ("open-llama", "OpenLlama"), ("openai-gpt", "OpenAI GPT"), ("opt", "OPT"), ("owlv2", "OWLv2"), ("owlvit", "OWL-ViT"), ("paligemma", "PaliGemma"), ("patchtsmixer", "PatchTSMixer"), ("patchtst", "PatchTST"), ("pegasus", "Pegasus"), ("pegasus_x", "PEGASUS-X"), ("perceiver", "Perceiver"), ("persimmon", "Persimmon"), ("phi", "Phi"), ("phi3", "Phi3"), ("phimoe", "Phimoe"), ("phobert", "PhoBERT"), ("pix2struct", "Pix2Struct"), ("pixtral", "Pixtral"), ("plbart", "PLBart"), ("poolformer", "PoolFormer"), ("pop2piano", "Pop2Piano"), ("prophetnet", "ProphetNet"), ("pvt", "PVT"), ("pvt_v2", "PVTv2"), ("qdqbert", "QDQBert"), ("qwen2", "Qwen2"), ("qwen2_5_vl", "Qwen2_5_VL"), ("qwen2_audio", "Qwen2Audio"), ("qwen2_audio_encoder", "Qwen2AudioEncoder"), ("qwen2_moe", "Qwen2MoE"), ("qwen2_vl", "Qwen2VL"), ("rag", "RAG"), ("realm", "REALM"), ("recurrent_gemma", "RecurrentGemma"), ("reformer", "Reformer"), ("regnet", "RegNet"), ("rembert", "RemBERT"), ("resnet", "ResNet"), ("retribert", "RetriBERT"), ("roberta", "RoBERTa"), ("roberta-prelayernorm", "RoBERTa-PreLayerNorm"), ("roc_bert", "RoCBert"), ("roformer", "RoFormer"), ("rt_detr", "RT-DETR"), ("rt_detr_resnet", "RT-DETR-ResNet"), ("rwkv", "RWKV"), ("sam", "SAM"), ("seamless_m4t", "SeamlessM4T"), ("seamless_m4t_v2", "SeamlessM4Tv2"), ("segformer", "SegFormer"), ("seggpt", "SegGPT"), ("sew", "SEW"), ("sew-d", "SEW-D"), ("siglip", "SigLIP"), ("siglip_vision_model", "SiglipVisionModel"), ("speech-encoder-decoder", "Speech Encoder decoder"), ("speech_to_text", "Speech2Text"), ("speech_to_text_2", "Speech2Text2"), ("speecht5", "SpeechT5"), ("splinter", "Splinter"), ("squeezebert", "SqueezeBERT"), ("stablelm", "StableLm"), ("starcoder2", "Starcoder2"), ("superglue", "SuperGlue"), ("superpoint", "SuperPoint"), ("swiftformer", "SwiftFormer"), ("swin", "Swin Transformer"), ("swin2sr", "Swin2SR"), ("swinv2", "Swin Transformer V2"), ("switch_transformers", "SwitchTransformers"), ("t5", "T5"), ("t5v1.1", "T5v1.1"), ("table-transformer", "Table Transformer"), ("tapas", "TAPAS"), ("tapex", "TAPEX"), ("textnet", "TextNet"), ("time_series_transformer", "Time Series Transformer"), ("timesformer", "TimeSformer"), ("timm_backbone", "TimmBackbone"), ("timm_wrapper", "TimmWrapperModel"), ("trajectory_transformer", "Trajectory Transformer"), ("transfo-xl", "Transformer-XL"), ("trocr", "TrOCR"), ("tvlt", "TVLT"), ("tvp", "TVP"), ("udop", "UDOP"), ("ul2", "UL2"), ("umt5", "UMT5"), ("unispeech", "UniSpeech"), ("unispeech-sat", "UniSpeechSat"), ("univnet", "UnivNet"), ("upernet", "UPerNet"), ("van", "VAN"), ("video_llava", "VideoLlava"), ("videomae", "VideoMAE"), ("vilt", "ViLT"), ("vipllava", "VipLlava"), ("vision-encoder-decoder", "Vision Encoder decoder"), ("vision-text-dual-encoder", "VisionTextDualEncoder"), ("visual_bert", "VisualBERT"), ("vit", "ViT"), ("vit_hybrid", "ViT Hybrid"), ("vit_mae", "ViTMAE"), ("vit_msn", "ViTMSN"), ("vitdet", "VitDet"), ("vitmatte", "ViTMatte"), ("vitpose", "ViTPose"), ("vitpose_backbone", "ViTPoseBackbone"), ("vits", "VITS"), ("vivit", "ViViT"), ("wav2vec2", "Wav2Vec2"), ("wav2vec2-bert", "Wav2Vec2-BERT"), ("wav2vec2-conformer", "Wav2Vec2-Conformer"), ("wav2vec2_phoneme", "Wav2Vec2Phoneme"), ("wavlm", "WavLM"), ("whisper", "Whisper"), ("xclip", "X-CLIP"), ("xglm", "XGLM"), ("xlm", "XLM"), ("xlm-prophetnet", "XLM-ProphetNet"), ("xlm-roberta", "XLM-RoBERTa"), ("xlm-roberta-xl", "XLM-RoBERTa-XL"), ("xlm-v", "XLM-V"), ("xlnet", "XLNet"), ("xls_r", "XLS-R"), ("xlsr_wav2vec2", "XLSR-Wav2Vec2"), ("xmod", "X-MOD"), ("yolos", "YOLOS"), ("yoso", "YOSO"), ("zamba", "Zamba"), ("zamba2", "Zamba2"), ("zoedepth", "ZoeDepth"), ] ) # This is tied to the processing `-` -> `_` in `model_type_to_module_name`. For example, instead of putting # `transfo-xl` (as in `CONFIG_MAPPING_NAMES`), we should use `transfo_xl`. DEPRECATED_MODELS = [ "bort", "deta", "efficientformer", "ernie_m", "gptsan_japanese", "graphormer", "jukebox", "mctct", "mega", "mmbt", "nat", "nezha", "open_llama", "qdqbert", "realm", "retribert", "speech_to_text_2", "tapex", "trajectory_transformer", "transfo_xl", "tvlt", "van", "vit_hybrid", "xlm_prophetnet", ] SPECIAL_MODEL_TYPE_TO_MODULE_NAME = OrderedDict( [ ("openai-gpt", "openai"), ("data2vec-audio", "data2vec"), ("data2vec-text", "data2vec"), ("data2vec-vision", "data2vec"), ("donut-swin", "donut"), ("kosmos-2", "kosmos2"), ("maskformer-swin", "maskformer"), ("xclip", "x_clip"), ("clip_vision_model", "clip"), ("qwen2_audio_encoder", "qwen2_audio"), ("clip_text_model", "clip"), ("aria_text", "aria"), ("idefics3_vision", "idefics3"), ("siglip_vision_model", "siglip"), ("chinese_clip_vision_model", "chinese_clip"), ("rt_detr_resnet", "rt_detr"), ("granitevision", "llava_next"), ] ) def model_type_to_module_name(key): """Converts a config key to the corresponding module.""" # Special treatment if key in SPECIAL_MODEL_TYPE_TO_MODULE_NAME: key = SPECIAL_MODEL_TYPE_TO_MODULE_NAME[key] if key in DEPRECATED_MODELS: key = f"deprecated.{key}" return key key = key.replace("-", "_") if key in DEPRECATED_MODELS: key = f"deprecated.{key}" return key def config_class_to_model_type(config): """Converts a config class name to the corresponding model type""" for key, cls in CONFIG_MAPPING_NAMES.items(): if cls == config: return key # if key not found check in extra content for key, cls in CONFIG_MAPPING._extra_content.items(): if cls.__name__ == config: return key return None class _LazyConfigMapping(OrderedDict): """ A dictionary that lazily load its values when they are requested. """ def __init__(self, mapping): self._mapping = mapping self._extra_content = {} self._modules = {} def __getitem__(self, key): if key in self._extra_content: return self._extra_content[key] if key not in self._mapping: raise KeyError(key) value = self._mapping[key] module_name = model_type_to_module_name(key) if module_name not in self._modules: self._modules[module_name] = importlib.import_module(f".{module_name}", "transformers.models") if hasattr(self._modules[module_name], value): return getattr(self._modules[module_name], value) # Some of the mappings have entries model_type -> config of another model type. In that case we try to grab the # object at the top level. transformers_module = importlib.import_module("transformers") return getattr(transformers_module, value) def keys(self): return list(self._mapping.keys()) + list(self._extra_content.keys()) def values(self): return [self[k] for k in self._mapping.keys()] + list(self._extra_content.values()) def items(self): return [(k, self[k]) for k in self._mapping.keys()] + list(self._extra_content.items()) def __iter__(self): return iter(list(self._mapping.keys()) + list(self._extra_content.keys())) def __contains__(self, item): return item in self._mapping or item in self._extra_content def register(self, key, value, exist_ok=False): """ Register a new configuration in this mapping. """ if key in self._mapping.keys() and not exist_ok: raise ValueError(f"'{key}' is already used by a Transformers config, pick another name.") self._extra_content[key] = value CONFIG_MAPPING = _LazyConfigMapping(CONFIG_MAPPING_NAMES) class _LazyLoadAllMappings(OrderedDict): """ A mapping that will load all pairs of key values at the first access (either by indexing, requestions keys, values, etc.) Args: mapping: The mapping to load. """ def __init__(self, mapping): self._mapping = mapping self._initialized = False self._data = {} def _initialize(self): if self._initialized: return for model_type, map_name in self._mapping.items(): module_name = model_type_to_module_name(model_type) module = importlib.import_module(f".{module_name}", "transformers.models") mapping = getattr(module, map_name) self._data.update(mapping) self._initialized = True def __getitem__(self, key): self._initialize() return self._data[key] def keys(self): self._initialize() return self._data.keys() def values(self): self._initialize() return self._data.values() def items(self): self._initialize() return self._data.keys() def __iter__(self): self._initialize() return iter(self._data) def __contains__(self, item): self._initialize() return item in self._data def _get_class_name(model_class: Union[str, List[str]]): if isinstance(model_class, (list, tuple)): return " or ".join([f"[`{c}`]" for c in model_class if c is not None]) return f"[`{model_class}`]" def _list_model_options(indent, config_to_class=None, use_model_types=True): if config_to_class is None and not use_model_types: raise ValueError("Using `use_model_types=False` requires a `config_to_class` dictionary.") if use_model_types: if config_to_class is None: model_type_to_name = {model_type: f"[`{config}`]" for model_type, config in CONFIG_MAPPING_NAMES.items()} else: model_type_to_name = { model_type: _get_class_name(model_class) for model_type, model_class in config_to_class.items() if model_type in MODEL_NAMES_MAPPING } lines = [ f"{indent}- **{model_type}** -- {model_type_to_name[model_type]} ({MODEL_NAMES_MAPPING[model_type]} model)" for model_type in sorted(model_type_to_name.keys()) ] else: config_to_name = { CONFIG_MAPPING_NAMES[config]: _get_class_name(clas) for config, clas in config_to_class.items() if config in CONFIG_MAPPING_NAMES } config_to_model_name = { config: MODEL_NAMES_MAPPING[model_type] for model_type, config in CONFIG_MAPPING_NAMES.items() } lines = [ f"{indent}- [`{config_name}`] configuration class:" f" {config_to_name[config_name]} ({config_to_model_name[config_name]} model)" for config_name in sorted(config_to_name.keys()) ] return "\n".join(lines) def replace_list_option_in_docstrings(config_to_class=None, use_model_types=True): def docstring_decorator(fn): docstrings = fn.__doc__ if docstrings is None: # Example: -OO return fn lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^(\s*)List options\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = re.search(r"^(\s*)List options\s*$", lines[i]).groups()[0] if use_model_types: indent = f"{indent} " lines[i] = _list_model_options(indent, config_to_class=config_to_class, use_model_types=use_model_types) docstrings = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'List options' in its docstring as placeholder, current" f" docstring is:\n{docstrings}" ) fn.__doc__ = docstrings return fn return docstring_decorator class AutoConfig: r""" This is a generic configuration class that will be instantiated as one of the configuration classes of the library when created with the [`~AutoConfig.from_pretrained`] class method. This class cannot be instantiated directly using `__init__()` (throws an error). """ def __init__(self): raise EnvironmentError( "AutoConfig is designed to be instantiated " "using the `AutoConfig.from_pretrained(pretrained_model_name_or_path)` method." ) @classmethod def for_model(cls, model_type: str, *args, **kwargs): if model_type in CONFIG_MAPPING: config_class = CONFIG_MAPPING[model_type] return config_class(*args, **kwargs) raise ValueError( f"Unrecognized model identifier: {model_type}. Should contain one of {', '.join(CONFIG_MAPPING.keys())}" ) @classmethod @replace_list_option_in_docstrings() def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate one of the configuration classes of the library from a pretrained model configuration. The configuration class to instantiate is selected based on the `model_type` property of the config object that is loaded, or when it's missing, by falling back to using pattern matching on `pretrained_model_name_or_path`: List options Args: pretrained_model_name_or_path (`str` or `os.PathLike`): Can be either: - A string, the *model id* of a pretrained model configuration hosted inside a model repo on huggingface.co. - A path to a *directory* containing a configuration file saved using the [`~PretrainedConfig.save_pretrained`] method, or the [`~PreTrainedModel.save_pretrained`] method, e.g., `./my_model_directory/`. - A path or url to a saved configuration JSON *file*, e.g., `./my_model_directory/configuration.json`. cache_dir (`str` or `os.PathLike`, *optional*): Path to a directory in which a downloaded pretrained model configuration should be cached if the standard cache should not be used. force_download (`bool`, *optional*, defaults to `False`): Whether or not to force the (re-)download the model weights and configuration files and override the cached versions if they exist. resume_download: Deprecated and ignored. All downloads are now resumed by default when possible. Will be removed in v5 of Transformers. proxies (`Dict[str, str]`, *optional*): A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request. revision (`str`, *optional*, defaults to `"main"`): The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any identifier allowed by git. return_unused_kwargs (`bool`, *optional*, defaults to `False`): If `False`, then this function returns just the final configuration object. If `True`, then this functions returns a `Tuple(config, unused_kwargs)` where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e., the part of `kwargs` which has not been used to update `config` and is otherwise ignored. trust_remote_code (`bool`, *optional*, defaults to `False`): Whether or not to allow for custom models defined on the Hub in their own modeling files. This option should only be set to `True` for repositories you trust and in which you have read the code, as it will execute code present on the Hub on your local machine. kwargs(additional keyword arguments, *optional*): The values in kwargs of any keys which are configuration attributes will be used to override the loaded values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled by the `return_unused_kwargs` keyword parameter. Examples: ```python >>> from transformers import AutoConfig >>> # Download configuration from huggingface.co and cache. >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased") >>> # Download configuration from huggingface.co (user-uploaded) and cache. >>> config = AutoConfig.from_pretrained("dbmdz/bert-base-german-cased") >>> # If configuration file is in a directory (e.g., was saved using *save_pretrained('./test/saved_model/')*). >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/") >>> # Load a specific configuration file. >>> config = AutoConfig.from_pretrained("./test/bert_saved_model/my_configuration.json") >>> # Change some config attributes when loading a pretrained config. >>> config = AutoConfig.from_pretrained("google-bert/bert-base-uncased", output_attentions=True, foo=False) >>> config.output_attentions True >>> config, unused_kwargs = AutoConfig.from_pretrained( ... "google-bert/bert-base-uncased", output_attentions=True, foo=False, return_unused_kwargs=True ... ) >>> config.output_attentions True >>> unused_kwargs {'foo': False} ```""" use_auth_token = kwargs.pop("use_auth_token", None) if use_auth_token is not None: warnings.warn( "The `use_auth_token` argument is deprecated and will be removed in v5 of Transformers. Please use `token` instead.", FutureWarning, ) if kwargs.get("token", None) is not None: raise ValueError( "`token` and `use_auth_token` are both specified. Please set only the argument `token`." ) kwargs["token"] = use_auth_token kwargs["_from_auto"] = True kwargs["name_or_path"] = pretrained_model_name_or_path trust_remote_code = kwargs.pop("trust_remote_code", None) code_revision = kwargs.pop("code_revision", None) config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs) has_remote_code = "auto_map" in config_dict and "AutoConfig" in config_dict["auto_map"] has_local_code = "model_type" in config_dict and config_dict["model_type"] in CONFIG_MAPPING trust_remote_code = resolve_trust_remote_code( trust_remote_code, pretrained_model_name_or_path, has_local_code, has_remote_code ) if has_remote_code and trust_remote_code: class_ref = config_dict["auto_map"]["AutoConfig"] config_class = get_class_from_dynamic_module( class_ref, pretrained_model_name_or_path, code_revision=code_revision, **kwargs ) if os.path.isdir(pretrained_model_name_or_path): config_class.register_for_auto_class() return config_class.from_pretrained(pretrained_model_name_or_path, **kwargs) elif "model_type" in config_dict: try: config_class = CONFIG_MAPPING[config_dict["model_type"]] except KeyError: raise ValueError( f"The checkpoint you are trying to load has model type `{config_dict['model_type']}` " "but Transformers does not recognize this architecture. This could be because of an " "issue with the checkpoint, or because your version of Transformers is out of date.\n\n" "You can update Transformers with the command `pip install --upgrade transformers`. If this " "does not work, and the checkpoint is very new, then there may not be a release version " "that supports this model yet. In this case, you can get the most up-to-date code by installing " "Transformers from source with the command " "`pip install git+https://github.com/huggingface/transformers.git`" ) return config_class.from_dict(config_dict, **unused_kwargs) else: # Fallback: use pattern matching on the string. # We go from longer names to shorter names to catch roberta before bert (for instance) for pattern in sorted(CONFIG_MAPPING.keys(), key=len, reverse=True): if pattern in str(pretrained_model_name_or_path): return CONFIG_MAPPING[pattern].from_dict(config_dict, **unused_kwargs) raise ValueError( f"Unrecognized model in {pretrained_model_name_or_path}. " f"Should have a `model_type` key in its {CONFIG_NAME}, or contain one of the following strings " f"in its name: {', '.join(CONFIG_MAPPING.keys())}" ) @staticmethod def register(model_type, config, exist_ok=False): """ Register a new configuration for this class. Args: model_type (`str`): The model type like "bert" or "gpt". config ([`PretrainedConfig`]): The config to register. """ if issubclass(config, PretrainedConfig) and config.model_type != model_type: raise ValueError( "The config you are passing has a `model_type` attribute that is not consistent with the model type " f"you passed (config has {config.model_type} and you passed {model_type}. Fix one of those so they " "match!" ) CONFIG_MAPPING.register(model_type, config, exist_ok=exist_ok)
transformers/src/transformers/models/auto/configuration_auto.py/0
{ "file_path": "transformers/src/transformers/models/auto/configuration_auto.py", "repo_id": "transformers", "token_count": 20586 }
92
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model.""" import math import os import warnings from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from packaging import version from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...generation import GenerationMixin from ...modeling_attn_mask_utils import ( _prepare_4d_attention_mask_for_sdpa, _prepare_4d_causal_attention_mask_for_sdpa, ) from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, get_torch_version, logging, replace_return_docstrings, ) from .configuration_bert import BertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google-bert/bert-base-uncased" _CONFIG_FOR_DOC = "BertConfig" # TokenClassification docstring _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "dbmdz/bert-large-cased-finetuned-conll03-english" _TOKEN_CLASS_EXPECTED_OUTPUT = ( "['O', 'I-ORG', 'I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC'] " ) _TOKEN_CLASS_EXPECTED_LOSS = 0.01 # QuestionAnswering docstring _CHECKPOINT_FOR_QA = "deepset/bert-base-cased-squad2" _QA_EXPECTED_OUTPUT = "'a nice puppet'" _QA_EXPECTED_LOSS = 7.41 _QA_TARGET_START_INDEX = 14 _QA_TARGET_END_INDEX = 15 # SequenceClassification docstring _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "textattack/bert-base-uncased-yelp-polarity" _SEQ_CLASS_EXPECTED_OUTPUT = "'LABEL_1'" _SEQ_CLASS_EXPECTED_LOSS = 0.01 def load_tf_weights_in_bert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except ValueError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") pointer.data = torch.from_numpy(array) return model class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer( "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False ) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BertSdpaSelfAttention(BertSelfAttention): def __init__(self, config, position_embedding_type=None): super().__init__(config, position_embedding_type=position_embedding_type) self.dropout_prob = config.attention_probs_dropout_prob self.require_contiguous_qkv = version.parse(get_torch_version()) < version.parse("2.2.0") # Adapted from BertSelfAttention def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: if self.position_embedding_type != "absolute" or output_attentions or head_mask is not None: # TODO: Improve this warning with e.g. `model.config._attn_implementation = "manual"` once implemented. logger.warning_once( "BertSdpaSelfAttention is used but `torch.nn.functional.scaled_dot_product_attention` does not support " "non-absolute `position_embedding_type` or `output_attentions=True` or `head_mask`. Falling back to " "the manual attention implementation, but specifying the manual implementation will be required from " "Transformers version v5.0.0 onwards. This warning can be removed using the argument " '`attn_implementation="eager"` when loading the model.' ) return super().forward( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) bsz, tgt_len, _ = hidden_states.size() query_layer = self.transpose_for_scores(self.query(hidden_states)) # If this is instantiated as a cross-attention module, the keys and values come from an encoder; the attention # mask needs to be such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None current_states = encoder_hidden_states if is_cross_attention else hidden_states attention_mask = encoder_attention_mask if is_cross_attention else attention_mask # Check `seq_length` of `past_key_value` == `len(current_states)` to support prefix tuning if is_cross_attention and past_key_value and past_key_value[0].shape[2] == current_states.shape[1]: key_layer, value_layer = past_key_value else: key_layer = self.transpose_for_scores(self.key(current_states)) value_layer = self.transpose_for_scores(self.value(current_states)) if past_key_value is not None and not is_cross_attention: key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # SDPA with memory-efficient backend is broken in torch==2.1.2 when using non-contiguous inputs and a custom # attn_mask, so we need to call `.contiguous()` here. This was fixed in torch==2.2.0. # Reference: https://github.com/pytorch/pytorch/issues/112577 if self.require_contiguous_qkv and query_layer.device.type == "cuda" and attention_mask is not None: query_layer = query_layer.contiguous() key_layer = key_layer.contiguous() value_layer = value_layer.contiguous() # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling. # The tgt_len > 1 is necessary to match with AttentionMaskConverter.to_causal_4d that does not create # a causal mask in case tgt_len == 1. is_causal = ( True if self.is_decoder and not is_cross_attention and attention_mask is None and tgt_len > 1 else False ) attn_output = torch.nn.functional.scaled_dot_product_attention( query_layer, key_layer, value_layer, attn_mask=attention_mask, dropout_p=self.dropout_prob if self.training else 0.0, is_causal=is_causal, ) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, self.all_head_size) outputs = (attn_output,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states BERT_SELF_ATTENTION_CLASSES = { "eager": BertSelfAttention, "sdpa": BertSdpaSelfAttention, } class BertAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = BERT_SELF_ATTENTION_CLASSES[config._attn_implementation]( config, position_embedding_type=position_embedding_type ) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BertAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BertAttention(config, position_embedding_type="absolute") self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: layer_outputs = self._gradient_checkpointing_func( layer_module.__call__, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def _tie_weights(self): self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" supports_gradient_checkpointing = True _supports_sdpa = True def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class BertForPreTrainingOutput(ModelOutput): """ Output type of [`BertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None BERT_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BERT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`or `(batch_size, sequence_length, target_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Bert Model transformer outputting raw hidden-states without any specific head on top.", BERT_START_DOCSTRING, ) class BertModel(BertPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ _no_split_modules = ["BertEmbeddings", "BertLayer"] def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) if add_pooling_layer else None self.attn_implementation = config._attn_implementation self.position_embedding_type = config.position_embedding_type # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)` or `(batch_size, sequence_length, target_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) if attention_mask is None: attention_mask = torch.ones((batch_size, seq_length + past_key_values_length), device=device) use_sdpa_attention_masks = ( self.attn_implementation == "sdpa" and self.position_embedding_type == "absolute" and head_mask is None and not output_attentions ) # Expand the attention mask if use_sdpa_attention_masks and attention_mask.dim() == 2: # Expand the attention mask for SDPA. # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len] if self.config.is_decoder: extended_attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( attention_mask, input_shape, embedding_output, past_key_values_length, ) else: extended_attention_mask = _prepare_4d_attention_mask_for_sdpa( attention_mask, embedding_output.dtype, tgt_len=seq_length ) else: # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if use_sdpa_attention_masks and encoder_attention_mask.dim() == 2: # Expand the attention mask for SDPA. # [bsz, seq_len] -> [bsz, 1, seq_len, seq_len] encoder_extended_attention_mask = _prepare_4d_attention_mask_for_sdpa( encoder_attention_mask, embedding_output.dtype, tgt_len=seq_length ) else: encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """ Bert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, BERT_START_DOCSTRING, ) class BertForPreTraining(BertPreTrainedModel): _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] def __init__(self, config): super().__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, next_sentence_label: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BertForPreTrainingOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, BertForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = BertForPreTraining.from_pretrained("google-bert/bert-base-uncased") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """Bert Model with a `language modeling` head on top for CLM fine-tuning.""", BERT_START_DOCSTRING ) class BertLMHeadModel(BertPreTrainedModel, GenerationMixin): _tied_weights_keys = ["cls.predictions.decoder.bias", "cls.predictions.decoder.weight"] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BertLMHeadModel` as a standalone, add `is_decoder=True.`") self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **loss_kwargs, ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]` past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: lm_loss = self.loss_function(prediction_scores, labels, self.config.vocab_size, **loss_kwargs) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past), ) return reordered_past @add_start_docstrings("""Bert Model with a `language modeling` head on top.""", BERT_START_DOCSTRING) class BertForMaskedLM(BertPreTrainedModel): _tied_weights_keys = ["predictions.decoder.bias", "cls.predictions.decoder.weight"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BertForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BertModel(config, add_pooling_layer=False) self.cls = BertOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings self.cls.predictions.bias = new_embeddings.bias @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, expected_output="'paris'", expected_loss=0.88, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """Bert Model with a `next sentence prediction (classification)` head on top.""", BERT_START_DOCSTRING, ) class BertForNextSentencePrediction(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple[torch.Tensor], NextSentencePredictorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see `input_ids` docstring). Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Example: ```python >>> from transformers import AutoTokenizer, BertForNextSentencePrediction >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased") >>> model = BertForNextSentencePrediction.from_pretrained("google-bert/bert-base-uncased") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt") >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> logits = outputs.logits >>> assert logits[0, 0] < logits[0, 1] # next sentence was random ``` """ if "next_sentence_label" in kwargs: warnings.warn( "The `next_sentence_label` argument is deprecated and will be removed in a future version, use" " `labels` instead.", FutureWarning, ) labels = kwargs.pop("next_sentence_label") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_scores = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_scores.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_scores,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BERT_START_DOCSTRING, ) class BertForSequenceClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_SEQ_CLASS_EXPECTED_OUTPUT, expected_loss=_SEQ_CLASS_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BERT_START_DOCSTRING, ) class BertForMultipleChoice(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BertModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BERT_START_DOCSTRING, ) class BertForTokenClassification(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config, add_pooling_layer=False) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT, expected_loss=_TOKEN_CLASS_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BERT_START_DOCSTRING, ) class BertForQuestionAnswering(BertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_QA, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, qa_target_start_index=_QA_TARGET_START_INDEX, qa_target_end_index=_QA_TARGET_END_INDEX, expected_output=_QA_EXPECTED_OUTPUT, expected_loss=_QA_EXPECTED_LOSS, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) __all__ = [ "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", "BertForPreTraining", "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", "load_tf_weights_in_bert", ]
transformers/src/transformers/models/bert/modeling_bert.py/0
{ "file_path": "transformers/src/transformers/models/bert/modeling_bert.py", "repo_id": "transformers", "token_count": 38445 }
93
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BigBird checkpoint.""" import argparse from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, big_bird_config_file, pytorch_dump_path, is_trivia_qa): # Initialise PyTorch model config = BigBirdConfig.from_json_file(big_bird_config_file) print(f"Building PyTorch model from configuration: {config}") if is_trivia_qa: model = BigBirdForQuestionAnswering(config) else: model = BigBirdForPreTraining(config) # Load weights from tf checkpoint load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=is_trivia_qa) # Save pytorch-model print(f"Save PyTorch model to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--big_bird_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained BERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa )
transformers/src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/big_bird/convert_bigbird_original_tf_checkpoint_to_pytorch.py", "repo_id": "transformers", "token_count": 901 }
94
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BiT checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def get_config(model_name): repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: k for k, v in id2label.items()} conv_layer = "std_conv" if "bit" in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" config = BitConfig( conv_layer=conv_layer, num_labels=1000, id2label=id2label, label2id=label2id, ) return config def rename_key(name): if "stem.conv" in name: name = name.replace("stem.conv", "bit.embedder.convolution") if "blocks" in name: name = name.replace("blocks", "layers") if "head.fc" in name: name = name.replace("head.fc", "classifier.1") if name.startswith("norm"): name = "bit." + name if "bit" not in name and "classifier" not in name: name = "bit.encoder." + name return name # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_bit_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our BiT structure. """ # define default BiT configuration config = get_config(model_name) # load original model from timm timm_model = create_model(model_name, pretrained=True) timm_model.eval() # load state_dict of original model state_dict = timm_model.state_dict() for key in state_dict.copy().keys(): val = state_dict.pop(key) state_dict[rename_key(key)] = val.squeeze() if "head" in key else val # load HuggingFace model model = BitForImageClassification(config) model.eval() model.load_state_dict(state_dict) # create image processor transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } processor = BitImageProcessor( do_resize=True, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors="pt").pixel_values # verify pixel values assert torch.allclose(timm_pixel_values, pixel_values) # verify logits with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print("Logits:", logits[0, :3]) print("Predicted class:", model.config.id2label[logits.argmax(-1).item()]) timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model {model_name} and processor to the hub") model.push_to_hub(f"ybelkada/{model_name}") processor.push_to_hub(f"ybelkada/{model_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_name", default="resnetv2_50x1_bitm", type=str, help="Name of the BiT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model to the hub.", ) args = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
transformers/src/transformers/models/bit/convert_bit_to_pytorch.py/0
{ "file_path": "transformers/src/transformers/models/bit/convert_bit_to_pytorch.py", "repo_id": "transformers", "token_count": 2309 }
95
# coding=utf-8 # Copyright 2021 The Facebook Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization class for BlenderbotSmall.""" import json import os from typing import Dict, List, Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char pairs = set(pairs) return pairs class BlenderbotSmallTokenizer(PreTrainedTokenizer): """ Constructs a Blenderbot-90M tokenizer based on BPE (Byte-Pair-Encoding) This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to the superclass for more information regarding methods. Args: vocab_file (`str`): File containing the vocabulary. merges_file (`str`): Path to the merges file. bos_token (`str`, *optional*, defaults to `"__start__"`): The beginning of sentence token. eos_token (`str`, *optional*, defaults to `"__end__"`): The end of sentence token. unk_token (`str`, *optional*, defaults to `"__unk__"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"__null__"`): The token used for padding, for example when batching sequences of different lengths. kwargs (*optional*): Additional keyword arguments passed along to [`PreTrainedTokenizer`] """ vocab_files_names = VOCAB_FILES_NAMES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, merges_file, bos_token="__start__", eos_token="__end__", unk_token="__unk__", pad_token="__null__", **kwargs, ): with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} super().__init__(unk_token=unk_token, bos_token=bos_token, eos_token=eos_token, pad_token=pad_token, **kwargs) @property def vocab_size(self) -> int: return len(self.encoder) def get_vocab(self) -> Dict: return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token: str) -> str: if token in self.cache: return self.cache[token] token = re.sub("([.,!?()])", r" \1", token) token = re.sub("(')", r" \1 ", token) token = re.sub(r"\s{2,}", " ", token) if "\n" in token: token = token.replace("\n", " __newln__") tokens = token.split(" ") words = [] for token in tokens: if not len(token): continue token = token.lower() word = tuple(token) word = tuple(list(word[:-1]) + [word[-1] + "</w>"]) pairs = get_pairs(word) if not pairs: words.append(token) continue while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) new_word.extend(word[i:j]) i = j except ValueError: new_word.extend(word[i:]) break if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = "@@ ".join(word) word = word[:-4] self.cache[token] = word words.append(word) return " ".join(words) def _tokenize(self, text: str) -> List[str]: """Split a string into tokens using BPE.""" split_tokens = [] words = re.findall(r"\S+\n?", text) for token in words: split_tokens.extend(list(self.bpe(token).split(" "))) return split_tokens def _convert_token_to_id(self, token: str) -> int: """Converts a token to an id using the vocab.""" token = token.lower() return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index: int) -> str: """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens: List[str]) -> str: """Converts a sequence of tokens in a single string.""" out_string = " ".join(tokens).replace("@@ ", "").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file __all__ = ["BlenderbotSmallTokenizer"]
transformers/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py/0
{ "file_path": "transformers/src/transformers/models/blenderbot_small/tokenization_blenderbot_small.py", "repo_id": "transformers", "token_count": 3709 }
96
# Copyright 2024 Meta Inc. and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import gc import json import os import requests import torch import yaml from accelerate import init_empty_weights from PIL import Image from transformers import ( ChameleonConfig, ChameleonForConditionalGeneration, ChameleonImageProcessor, ChameleonProcessor, ) try: from transformers import LlamaTokenizerFast except ImportError: raise ValueError( "Chameleon conversion supports only FastTokenizer and LlamaTokenizerFast can't be imported! " "Update your `tokenizers` library and re-run the tokenizer conversion." ) """ Sample usage: ``` python src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py \ --input_dir /path/to/downloaded/chameleon/weights --model_size 7B --output_dir /output/path ``` Thereafter, models can be loaded via: ```py from transformers import ChameleonForConditionalGeneration, LlamaTokenizerFast model = ChameleonForConditionalGeneration.from_pretrained("/output/path") tokenizer = LlamaTokenizerFast.from_pretrained("/output/path") ``` Important note: you need to be able to host the whole model in RAM to execute this script (even if the biggest versions come in several checkpoints they each contain a part of each weight of the model, so we need to load them all in RAM). """ NUM_SHARDS = { "7B": 1, "30B": 4, } VOCAB_SIZE = 65536 def compute_intermediate_size(n, ffn_dim_multiplier=1, multiple_of=256): return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of) def read_json(path): with open(path, "r") as f: return json.load(f) def write_json(text, path): with open(path, "w") as f: json.dump(text, f) def write_model(model_path, input_base_path, model_size, chameleon_version=1): os.makedirs(model_path, exist_ok=True) input_model_path = os.path.join(input_base_path, "models", model_size.lower()) params_path = os.path.join(input_model_path, "params.json") consolidate_params_path = os.path.join(input_model_path, "consolidate_params.json") params = read_json(params_path) if os.path.isfile(consolidate_params_path): params = {**params, **read_json(consolidate_params_path)} num_shards = NUM_SHARDS[model_size] model_parallel_size = params["model_parallel_size"] params = params.get("model", params) n_layers = params["n_layers"] n_heads = params["n_heads"] n_heads_per_shard = n_heads // num_shards dim = params["dim"] dims_per_head = dim // n_heads base = params.get("rope_theta", 10000.0) swin_norm = params["swin_norm"] if base > 10000.0: max_position_embeddings = 16384 else: # Depending on the Chameleon version, the default max_position_embeddings has different values. if chameleon_version == 1: max_position_embeddings = 4096 else: raise NotImplementedError( f"Version {chameleon_version} of chameleon is not supported yet. " "Current supported versions of chameleon are [1]." ) if params.get("n_kv_heads", None) is not None: num_key_value_heads = params["n_kv_heads"] # for GQA / MQA num_local_key_value_heads = n_heads_per_shard // num_key_value_heads key_value_dim = dim // num_key_value_heads else: # compatibility with other checkpoints num_key_value_heads = n_heads num_local_key_value_heads = n_heads_per_shard key_value_dim = dim print(f"Fetching all parameters from the checkpoint at {input_model_path}.") # Load weights if num_shards == 1: # Not sharded # (The sharded implementation would also work, but this is simpler.) loaded = None for possible_name in ["consolidated.pth", "consolidated.00.pth"]: possible_path = os.path.join(input_model_path, possible_name) if os.path.exists(possible_path): loaded = torch.load(possible_path, map_location="cpu") break assert loaded is not None else: # Sharded loaded = [ torch.load(os.path.join(input_model_path, f"consolidated.{i:02d}.pth"), map_location="cpu") for i in range(num_shards) ] # permute for sliced rotary def permute(w, n_heads, dim1=dim, dim2=dim): return w.view(n_heads, dim1 // n_heads // 2, 2, dim2).transpose(1, 2).reshape(dim1, dim2) # Load weights to the state dict state_dict = {} for layer_i in range(n_layers): if num_shards == 1: # Unsharded state_dict.update( { f"model.layers.{layer_i}.self_attn.q_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wq.weight"], n_heads=n_heads ), f"model.layers.{layer_i}.self_attn.k_proj.weight": permute( loaded[f"layers.{layer_i}.attention.wk.weight"], n_heads=num_key_value_heads, dim1=key_value_dim, ), f"model.layers.{layer_i}.self_attn.v_proj.weight": loaded[f"layers.{layer_i}.attention.wv.weight"], f"model.layers.{layer_i}.self_attn.o_proj.weight": loaded[f"layers.{layer_i}.attention.wo.weight"], f"model.layers.{layer_i}.mlp.gate_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w1.weight"], f"model.layers.{layer_i}.mlp.down_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w2.weight"], f"model.layers.{layer_i}.mlp.up_proj.weight": loaded[f"layers.{layer_i}.feed_forward.w3.weight"], f"model.layers.{layer_i}.input_layernorm.weight": loaded[ f"layers.{layer_i}.attention_norm.weight" ], f"model.layers.{layer_i}.post_attention_layernorm.weight": loaded[ f"layers.{layer_i}.ffn_norm.weight" ], } ) # qk_layernorm (see https://github.com/huggingface/transformers/pull/31534#issuecomment-2207354677) state_dict[f"model.layers.{layer_i}.self_attn.q_norm.weight"] = ( loaded[f"layers.{layer_i}.attention.q_normalization.weight"] .view(dims_per_head // 2, 2) .t() .reshape(1, -1) .repeat_interleave(n_heads, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.q_norm.bias"] = ( loaded[f"layers.{layer_i}.attention.q_normalization.bias"] .view(dims_per_head // 2, 2) .t() .reshape(1, -1) .repeat_interleave(n_heads, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.k_norm.weight"] = ( loaded[f"layers.{layer_i}.attention.k_normalization.weight"] .view(dims_per_head // 2, 2) .t() .reshape(1, -1) .repeat_interleave(num_key_value_heads, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.k_norm.bias"] = ( loaded[f"layers.{layer_i}.attention.k_normalization.bias"] .view(dims_per_head // 2, 2) .t() .reshape(1, -1) .repeat_interleave(num_key_value_heads, 0) ) else: # Sharded state_dict.update( { f"model.layers.{layer_i}.input_layernorm.weight": torch.stack( [l[f"layers.{layer_i}.attention_norm.weight"] for l in loaded] ).mean(dim=0), f"model.layers.{layer_i}.post_attention_layernorm.weight": torch.stack( [l[f"layers.{layer_i}.ffn_norm.weight"] for l in loaded] ).mean(dim=0), } ) state_dict[f"model.layers.{layer_i}.self_attn.q_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wq.weight"].view(n_heads_per_shard, dims_per_head, dim) for i in range(num_shards) ], dim=0, ).reshape(dim, dim), n_heads=n_heads, ) state_dict[f"model.layers.{layer_i}.self_attn.k_proj.weight"] = permute( torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wk.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim), n_heads=num_key_value_heads, dim1=key_value_dim, ) # qk_layernorm (see https://github.com/huggingface/transformers/pull/31534#issuecomment-2207354677) state_dict[f"model.layers.{layer_i}.self_attn.q_norm.weight"] = ( torch.cat([l[f"layers.{layer_i}.attention.q_normalization.weight"].unsqueeze(0) for l in loaded]) .view(num_shards, dims_per_head // 2, 2) .transpose(1, 2) .reshape(num_shards, -1) .repeat_interleave(n_heads // num_shards, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.q_norm.bias"] = ( torch.cat([l[f"layers.{layer_i}.attention.q_normalization.bias"].unsqueeze(0) for l in loaded]) .view(num_shards, dims_per_head // 2, 2) .transpose(1, 2) .reshape(num_shards, -1) .repeat_interleave(n_heads // num_shards, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.k_norm.weight"] = ( torch.cat([l[f"layers.{layer_i}.attention.k_normalization.weight"].unsqueeze(0) for l in loaded]) .view(num_shards, dims_per_head // 2, 2) .transpose(1, 2) .reshape(num_shards, -1) .repeat_interleave(num_key_value_heads // num_shards, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.k_norm.bias"] = ( torch.cat([l[f"layers.{layer_i}.attention.k_normalization.bias"].unsqueeze(0) for l in loaded]) .view(num_shards, dims_per_head // 2, 2) .transpose(1, 2) .reshape(num_shards, -1) .repeat_interleave(num_key_value_heads // num_shards, 0) ) state_dict[f"model.layers.{layer_i}.self_attn.v_proj.weight"] = torch.cat( [ loaded[i][f"layers.{layer_i}.attention.wv.weight"].view( num_local_key_value_heads, dims_per_head, dim ) for i in range(num_shards) ], dim=0, ).reshape(key_value_dim, dim) state_dict[f"model.layers.{layer_i}.self_attn.o_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.attention.wo.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.gate_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w1.weight"] for i in range(num_shards)], dim=0 ) state_dict[f"model.layers.{layer_i}.mlp.down_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w2.weight"] for i in range(num_shards)], dim=1 ) state_dict[f"model.layers.{layer_i}.mlp.up_proj.weight"] = torch.cat( [loaded[i][f"layers.{layer_i}.feed_forward.w3.weight"] for i in range(num_shards)], dim=0 ) if num_shards == 1: # Unsharded state_dict.update( { "model.embed_tokens.weight": loaded["tok_embeddings.weight"], "model.norm.weight": loaded["norm.weight"], "lm_head.weight": loaded["output.weight"], } ) else: state_dict.update( { "model.embed_tokens.weight": torch.cat( [loaded[i]["tok_embeddings.weight"] for i in range(num_shards)], dim=1 ), "model.norm.weight": torch.stack([loaded[i]["norm.weight"] for i in range(num_shards)]).mean(dim=0), "lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(num_shards)], dim=0), } ) # Load VQGAN weights vqgan_path = os.path.join(input_base_path, "tokenizer/vqgan.ckpt") vqgan_state_dict = torch.load(vqgan_path, map_location="cpu")["state_dict"] for k, v in vqgan_state_dict.items(): if "decoder" in k: continue # we dont do image generation yet state_dict[f"model.vqmodel.{k}"] = v # Write configs ffn_dim_multiplier = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1 multiple_of = params["multiple_of"] if "multiple_of" in params else 256 with open(os.path.join(input_base_path, "tokenizer/text_tokenizer.json")) as tokenizer_file: tokenizer_config = json.load(tokenizer_file) vocabulary_map = tokenizer_config["model"]["vocab"] vocabulary_map["<image>"] = vocabulary_map[ "<reserved08707>" ] # use a reserved token instead of adding a new one del vocabulary_map["<reserved08707>"] for token in tokenizer_config["added_tokens"]: if token["content"] == "<reserved08707>": token["content"] = "<image>" with open(os.path.join(input_base_path, "tokenizer/text_tokenizer_modified.json"), "w") as f: json.dump(tokenizer_config, f) # save the new file to init tokenizer later vq_keys_to_replace = [ ("ch", "base_channels"), ("out_ch", "out_channels"), ("n_embed", "num_embeddings"), ("ch_mult", "channel_multiplier"), ("double_z", "double_latent"), ("z_channels", "latent_channels"), ] with open(os.path.join(input_base_path, "tokenizer/vqgan.yaml")) as vqgan_cfg_file: vq_config = yaml.safe_load(vqgan_cfg_file)["model"]["params"] vq_config.update(**vq_config["ddconfig"]) for old, new in vq_keys_to_replace: vq_config[new] = vq_config[old] del vq_config["ddconfig"] del vq_config["ckpt_path"] del vq_config["lossconfig"] config = ChameleonConfig( hidden_size=dim, intermediate_size=compute_intermediate_size(dim, ffn_dim_multiplier, multiple_of), num_attention_heads=params["n_heads"], num_hidden_layers=params["n_layers"], rms_norm_eps=params["norm_eps"], num_key_value_heads=num_key_value_heads, vocab_size=VOCAB_SIZE, rope_theta=base, max_position_embeddings=max_position_embeddings, model_parallel_size=model_parallel_size, swin_norm=swin_norm, vq_config=vq_config, vocabulary_map=vocabulary_map, ) with init_empty_weights(): model = ChameleonForConditionalGeneration(config) model.load_state_dict(state_dict, assign=True, strict=False) model.save_pretrained(model_path, safe_serialization=True) # Load and save the processor tokenizer = LlamaTokenizerFast( tokenizer_file=os.path.join(input_base_path, "tokenizer/text_tokenizer_modified.json"), legacy=False ) tokenizer.sep_token_id = 8710 # assign <reserved08706> to sep so that we can append it after input text tokenizer.pad_token_id = 1 # assing <pad> to special pad_token image_processor = ChameleonImageProcessor() processor = ChameleonProcessor(image_processor=image_processor, tokenizer=tokenizer) processor.save_pretrained(model_path) # Make space so we can load the model properly now. del state_dict del loaded del vqgan_state_dict gc.collect() # Short inference on a few examples to check if generation makes sense # taken from https://github.com/facebookresearch/chameleon/blob/7a72f40aa5f462965c8374f25257f55b65b25ff4/data/prompts_for_human_evaluations.jsonl print("Loading the checkpoint in a Chameleon model...") print("*" * 100) model = ChameleonForConditionalGeneration.from_pretrained( model_path, attn_implementation="eager", torch_dtype=torch.bfloat16, device_map="auto" ) processor = ChameleonProcessor.from_pretrained(model_path) prompt = "I'm very intrigued by this work of art:<image>Please tell me about the artist." image = Image.open( requests.get( "https://uploads4.wikiart.org/images/paul-klee/death-for-the-idea-1915.jpg!Large.jpg", stream=True ).raw ) inputs = processor(prompt, images=image, return_tensors="pt").to(model.device, torch.bfloat16) length = inputs.input_ids.shape[1] out = model.generate(**inputs, max_new_tokens=40, do_sample=False) generated_text = processor.batch_decode(out[:, length:], skip_special_tokens=True)[0] print(f"Generation for single-image: {generated_text}") print("*" * 100) # Multi-image example prompt = "I used to know a lot about constellations when I was younger, but as I grew older, I forgot most of what I knew. These are the only two constellations that I really remember now.<image><image>I would like for you to tell me about 3 more constellations and give me a little bit of history about the constellation." image = Image.open( requests.get("https://nineplanets.org/wp-content/uploads/2020/12/the-big-dipper-1.jpg", stream=True).raw ) image_2 = Image.open( requests.get("https://www.kxan.com/wp-content/uploads/sites/40/2020/10/ORION.jpg", stream=True).raw ) inputs = processor(prompt, images=[image, image_2], return_tensors="pt").to(model.device, dtype=torch.bfloat16) length = inputs.input_ids.shape[1] out = model.generate(**inputs, max_new_tokens=50, do_sample=False) generated_text = processor.batch_decode(out[:, length:], skip_special_tokens=True)[0] print(f"Generation for multi-image: {generated_text}") def main(): parser = argparse.ArgumentParser() parser.add_argument( "--input_dir", help="Location of Chameleon weights", ) parser.add_argument( "--model_size", choices=["7B", "30B"], help="" " models correspond to the finetuned versions, and are specific to the Chameleon official release. For more details on Chameleon, checkout the original repo: https://github.com/facebookresearch/chameleon", ) parser.add_argument( "--output_dir", help="Location to write HF model", ) parser.add_argument( "--test_inference", action="store_true", help="Whether to load the model for generation to test it's converted correctly.", ) # Different Chameleon versions used different default values for max_position_embeddings, hence the need to be able to specify which version is being used. parser.add_argument( "--chameleon_version", choices=[1], default=1, type=int, help="Version of the Chameleon model to convert", ) args = parser.parse_args() write_model( model_path=args.output_dir, input_base_path=args.input_dir, model_size=args.model_size, chameleon_version=args.chameleon_version, ) if __name__ == "__main__": main()
transformers/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py/0
{ "file_path": "transformers/src/transformers/models/chameleon/convert_chameleon_weights_to_hf.py", "repo_id": "transformers", "token_count": 9652 }
97
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Audio/Text processor class for CLAP """ from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class ClapProcessor(ProcessorMixin): r""" Constructs a CLAP processor which wraps a CLAP feature extractor and a RoBerta tokenizer into a single processor. [`ClapProcessor`] offers all the functionalities of [`ClapFeatureExtractor`] and [`RobertaTokenizerFast`]. See the [`~ClapProcessor.__call__`] and [`~ClapProcessor.decode`] for more information. Args: feature_extractor ([`ClapFeatureExtractor`]): The audio processor is a required input. tokenizer ([`RobertaTokenizerFast`]): The tokenizer is a required input. """ feature_extractor_class = "ClapFeatureExtractor" tokenizer_class = ("RobertaTokenizer", "RobertaTokenizerFast") def __init__(self, feature_extractor, tokenizer): super().__init__(feature_extractor, tokenizer) def __call__(self, text=None, audios=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and audio(s). This method forwards the `text` and `kwargs` arguments to RobertaTokenizerFast's [`~RobertaTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the audio(s), this method forwards the `audios` and `kwrags` arguments to ClapFeatureExtractor's [`~ClapFeatureExtractor.__call__`] if `audios` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). audios (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`): The audio or batch of audios to be prepared. Each audio can be NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each audio should be of shape (C, T), where C is a number of channels, and T the sample length of the audio. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **audio_features** -- Audio features to be fed to a model. Returned when `audios` is not `None`. """ sampling_rate = kwargs.pop("sampling_rate", None) if text is None and audios is None: raise ValueError("You have to specify either text or audios. Both cannot be none.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if audios is not None: audio_features = self.feature_extractor( audios, sampling_rate=sampling_rate, return_tensors=return_tensors, **kwargs ) if text is not None and audios is not None: encoding.update(audio_features) return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**audio_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to RobertaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def model_input_names(self): tokenizer_input_names = self.tokenizer.model_input_names feature_extractor_input_names = self.feature_extractor.model_input_names return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names)) __all__ = ["ClapProcessor"]
transformers/src/transformers/models/clap/processing_clap.py/0
{ "file_path": "transformers/src/transformers/models/clap/processing_clap.py", "repo_id": "transformers", "token_count": 2183 }
98
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Image/Text processor class for CLIPSeg """ import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class CLIPSegProcessor(ProcessorMixin): r""" Constructs a CLIPSeg processor which wraps a CLIPSeg image processor and a CLIP tokenizer into a single processor. [`CLIPSegProcessor`] offers all the functionalities of [`ViTImageProcessor`] and [`CLIPTokenizerFast`]. See the [`~CLIPSegProcessor.__call__`] and [`~CLIPSegProcessor.decode`] for more information. Args: image_processor ([`ViTImageProcessor`], *optional*): The image processor is a required input. tokenizer ([`CLIPTokenizerFast`], *optional*): The tokenizer is a required input. """ attributes = ["image_processor", "tokenizer"] image_processor_class = "ViTImageProcessor" tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self, image_processor=None, tokenizer=None, **kwargs): feature_extractor = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead.", FutureWarning, ) feature_extractor = kwargs.pop("feature_extractor") image_processor = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`.") if tokenizer is None: raise ValueError("You need to specify a `tokenizer`.") super().__init__(image_processor, tokenizer) def __call__(self, text=None, images=None, visual_prompt=None, return_tensors=None, **kwargs): """ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text` and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to ViTImageProcessor's [`~ViTImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring of the above two methods for more information. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch tensor. Both channels-first and channels-last formats are supported. visual_prompt (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`): The visual prompt image or batch of images to be prepared. Each visual prompt image can be a PIL image, NumPy array or PyTorch tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a number of channels, H and W are image height and width. return_tensors (`str` or [`~utils.TensorType`], *optional*): If set, will return tensors of a particular framework. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return NumPy `np.ndarray` objects. - `'jax'`: Return JAX `jnp.ndarray` objects. Returns: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not `None`). - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`. """ if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images.") if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt.") if text is not None: encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs) if visual_prompt is not None: prompt_features = self.image_processor(visual_prompt, return_tensors=return_tensors, **kwargs) if images is not None: image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs) if visual_prompt is not None and images is not None: encoding = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: encoding["pixel_values"] = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: encoding = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors) def batch_decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.batch_decode(*args, **kwargs) def decode(self, *args, **kwargs): """ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to the docstring of this method for more information. """ return self.tokenizer.decode(*args, **kwargs) @property def feature_extractor_class(self): warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.", FutureWarning, ) return self.image_processor_class @property def feature_extractor(self): warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.", FutureWarning, ) return self.image_processor __all__ = ["CLIPSegProcessor"]
transformers/src/transformers/models/clipseg/processing_clipseg.py/0
{ "file_path": "transformers/src/transformers/models/clipseg/processing_clipseg.py", "repo_id": "transformers", "token_count": 3057 }
99