text
stringlengths
7
3.71M
id
stringlengths
12
166
metadata
dict
__index_level_0__
int64
0
658
import type { LayoutServerLoad } from "./$types"; import { collections } from "$lib/server/database"; import type { Conversation } from "$lib/types/Conversation"; import { UrlDependency } from "$lib/types/UrlDependency"; import { defaultModel, models, oldModels, validateModel } from "$lib/server/models"; import { authCondition, requiresUser } from "$lib/server/auth"; import { DEFAULT_SETTINGS } from "$lib/types/Settings"; import { env } from "$env/dynamic/private"; import { ObjectId } from "mongodb"; import type { ConvSidebar } from "$lib/types/ConvSidebar"; export const load: LayoutServerLoad = async ({ locals, depends }) => { depends(UrlDependency.ConversationList); const settings = await collections.settings.findOne(authCondition(locals)); // If the active model in settings is not valid, set it to the default model. This can happen if model was disabled. if ( settings && !validateModel(models).safeParse(settings?.activeModel).success && !settings.assistants?.map((el) => el.toString())?.includes(settings?.activeModel) ) { settings.activeModel = defaultModel.id; await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } // if the model is unlisted, set the active model to the default model if ( settings?.activeModel && models.find((m) => m.id === settings?.activeModel)?.unlisted === true ) { settings.activeModel = defaultModel.id; await collections.settings.updateOne(authCondition(locals), { $set: { activeModel: defaultModel.id }, }); } const enableAssistants = env.ENABLE_ASSISTANTS === "true"; const assistantActive = !models.map(({ id }) => id).includes(settings?.activeModel ?? ""); const assistant = assistantActive ? JSON.parse( JSON.stringify( await collections.assistants.findOne({ _id: new ObjectId(settings?.activeModel), }) ) ) : null; const conversations = await collections.conversations .find(authCondition(locals)) .sort({ updatedAt: -1 }) .project< Pick<Conversation, "title" | "model" | "_id" | "updatedAt" | "createdAt" | "assistantId"> >({ title: 1, model: 1, _id: 1, updatedAt: 1, createdAt: 1, assistantId: 1, }) .limit(300) .toArray(); const userAssistants = settings?.assistants?.map((assistantId) => assistantId.toString()) ?? []; const userAssistantsSet = new Set(userAssistants); const assistantIds = [ ...userAssistants.map((el) => new ObjectId(el)), ...(conversations.map((conv) => conv.assistantId).filter((el) => !!el) as ObjectId[]), ]; const assistants = await collections.assistants.find({ _id: { $in: assistantIds } }).toArray(); const messagesBeforeLogin = env.MESSAGES_BEFORE_LOGIN ? parseInt(env.MESSAGES_BEFORE_LOGIN) : 0; let loginRequired = false; if (requiresUser && !locals.user && messagesBeforeLogin) { if (conversations.length > messagesBeforeLogin) { loginRequired = true; } else { // get the number of messages where `from === "assistant"` across all conversations. const totalMessages = ( await collections.conversations .aggregate([ { $match: { ...authCondition(locals), "messages.from": "assistant" } }, { $project: { messages: 1 } }, { $limit: messagesBeforeLogin + 1 }, { $unwind: "$messages" }, { $match: { "messages.from": "assistant" } }, { $count: "messages" }, ]) .toArray() )[0]?.messages ?? 0; loginRequired = totalMessages > messagesBeforeLogin; } } return { conversations: conversations.map((conv) => { if (settings?.hideEmojiOnSidebar) { conv.title = conv.title.replace(/\p{Emoji}/gu, ""); } // remove invalid unicode and trim whitespaces conv.title = conv.title.replace(/\uFFFD/gu, "").trimStart(); return { id: conv._id.toString(), title: conv.title, model: conv.model ?? defaultModel, updatedAt: conv.updatedAt, assistantId: conv.assistantId?.toString(), avatarHash: conv.assistantId && assistants.find((a) => a._id.toString() === conv.assistantId?.toString())?.avatar, }; }) satisfies ConvSidebar[], settings: { searchEnabled: !!( env.SERPAPI_KEY || env.SERPER_API_KEY || env.SERPSTACK_API_KEY || env.YDC_API_KEY || env.USE_LOCAL_WEBSEARCH || env.SEARXNG_QUERY_URL ), ethicsModalAccepted: !!settings?.ethicsModalAcceptedAt, ethicsModalAcceptedAt: settings?.ethicsModalAcceptedAt ?? null, activeModel: settings?.activeModel ?? DEFAULT_SETTINGS.activeModel, hideEmojiOnSidebar: settings?.hideEmojiOnSidebar ?? false, shareConversationsWithModelAuthors: settings?.shareConversationsWithModelAuthors ?? DEFAULT_SETTINGS.shareConversationsWithModelAuthors, customPrompts: settings?.customPrompts ?? {}, assistants: userAssistants, }, models: models.map((model) => ({ id: model.id, name: model.name, websiteUrl: model.websiteUrl, modelUrl: model.modelUrl, tokenizer: model.tokenizer, datasetName: model.datasetName, datasetUrl: model.datasetUrl, displayName: model.displayName, description: model.description, logoUrl: model.logoUrl, promptExamples: model.promptExamples, parameters: model.parameters, preprompt: model.preprompt, multimodal: model.multimodal, unlisted: model.unlisted, })), oldModels, assistants: assistants .filter((el) => userAssistantsSet.has(el._id.toString())) .map((el) => ({ ...el, _id: el._id.toString(), createdById: undefined, createdByMe: el.createdById.toString() === (locals.user?._id ?? locals.sessionId).toString(), })), user: locals.user && { id: locals.user._id.toString(), username: locals.user.username, avatarUrl: locals.user.avatarUrl, email: locals.user.email, }, assistant, enableAssistants, enableAssistantsRAG: env.ENABLE_ASSISTANTS_RAG === "true", loginRequired, loginEnabled: requiresUser, guestMode: requiresUser && messagesBeforeLogin > 0, }; };
chat-ui/src/routes/+layout.server.ts/0
{ "file_path": "chat-ui/src/routes/+layout.server.ts", "repo_id": "chat-ui", "token_count": 2297 }
100
import { base } from "$app/paths"; import { env } from "$env/dynamic/private"; import { Database, collections } from "$lib/server/database.js"; import { SortKey, type Assistant } from "$lib/types/Assistant"; import type { User } from "$lib/types/User"; import { generateQueryTokens } from "$lib/utils/searchTokens.js"; import { error, redirect } from "@sveltejs/kit"; import type { Filter } from "mongodb"; const NUM_PER_PAGE = 24; export const load = async ({ url, locals }) => { if (!env.ENABLE_ASSISTANTS) { throw redirect(302, `${base}/`); } const modelId = url.searchParams.get("modelId"); const pageIndex = parseInt(url.searchParams.get("p") ?? "0"); const username = url.searchParams.get("user"); const query = url.searchParams.get("q")?.trim() ?? null; const sort = url.searchParams.get("sort")?.trim() ?? SortKey.TRENDING; const createdByCurrentUser = locals.user?.username && locals.user.username === username; let user: Pick<User, "_id"> | null = null; if (username) { user = await collections.users.findOne<Pick<User, "_id">>( { username }, { projection: { _id: 1 } } ); if (!user) { throw error(404, `User "${username}" doesn't exist`); } } // if there is no user, we show community assistants, so only show featured assistants const shouldBeFeatured = env.REQUIRE_FEATURED_ASSISTANTS === "true" && !user ? { featured: true } : {}; // if the user queried is not the current user, only show "public" assistants that have been shared before const shouldHaveBeenShared = env.REQUIRE_FEATURED_ASSISTANTS === "true" && !createdByCurrentUser ? { userCount: { $gt: 1 } } : {}; // fetch the top assistants sorted by user count from biggest to smallest. filter by model too if modelId is provided or query if query is provided const filter: Filter<Assistant> = { ...(modelId && { modelId }), ...(user && { createdById: user._id }), ...(query && { searchTokens: { $all: generateQueryTokens(query) } }), ...shouldBeFeatured, ...shouldHaveBeenShared, }; const assistants = await Database.getInstance() .getCollections() .assistants.find(filter) .skip(NUM_PER_PAGE * pageIndex) .sort({ ...(sort === SortKey.TRENDING && { last24HoursCount: -1 }), userCount: -1, }) .limit(NUM_PER_PAGE) .toArray(); const numTotalItems = await Database.getInstance() .getCollections() .assistants.countDocuments(filter); return { assistants: JSON.parse(JSON.stringify(assistants)) as Array<Assistant>, selectedModel: modelId ?? "", numTotalItems, numItemsPerPage: NUM_PER_PAGE, query, sort, }; };
chat-ui/src/routes/assistants/+page.server.ts/0
{ "file_path": "chat-ui/src/routes/assistants/+page.server.ts", "repo_id": "chat-ui", "token_count": 898 }
101
import { refreshSessionCookie } from "$lib/server/auth"; import { collections } from "$lib/server/database"; import { ObjectId } from "mongodb"; import { DEFAULT_SETTINGS } from "$lib/types/Settings"; import { z } from "zod"; import type { UserinfoResponse } from "openid-client"; import { error, type Cookies } from "@sveltejs/kit"; import crypto from "crypto"; import { sha256 } from "$lib/utils/sha256"; import { addWeeks } from "date-fns"; import { OIDConfig } from "$lib/server/auth"; export async function updateUser(params: { userData: UserinfoResponse; locals: App.Locals; cookies: Cookies; userAgent?: string; ip?: string; }) { const { userData, locals, cookies, userAgent, ip } = params; // Microsoft Entra v1 tokens do not provide preferred_username, instead the username is provided in the upn // claim. See https://learn.microsoft.com/en-us/entra/identity-platform/access-token-claims-reference if (!userData.preferred_username && userData.upn) { userData.preferred_username = userData.upn as string; } const { preferred_username: username, name, email, picture: avatarUrl, sub: hfUserId, } = z .object({ preferred_username: z.string().optional(), name: z.string(), picture: z.string().optional(), sub: z.string(), email: z.string().email().optional(), }) .setKey(OIDConfig.NAME_CLAIM, z.string()) .refine((data) => data.preferred_username || data.email, { message: "Either preferred_username or email must be provided by the provider.", }) .transform((data) => ({ ...data, name: data[OIDConfig.NAME_CLAIM], })) .parse(userData) as { preferred_username?: string; email?: string; picture?: string; sub: string; name: string; } & Record<string, string>; // Dynamically access user data based on NAME_CLAIM from environment // This approach allows us to adapt to different OIDC providers flexibly. // check if user already exists const existingUser = await collections.users.findOne({ hfUserId }); let userId = existingUser?._id; // update session cookie on login const previousSessionId = locals.sessionId; const secretSessionId = crypto.randomUUID(); const sessionId = await sha256(secretSessionId); if (await collections.sessions.findOne({ sessionId })) { throw error(500, "Session ID collision"); } locals.sessionId = sessionId; if (existingUser) { // update existing user if any await collections.users.updateOne( { _id: existingUser._id }, { $set: { username, name, avatarUrl } } ); // remove previous session if it exists and add new one await collections.sessions.deleteOne({ sessionId: previousSessionId }); await collections.sessions.insertOne({ _id: new ObjectId(), sessionId: locals.sessionId, userId: existingUser._id, createdAt: new Date(), updatedAt: new Date(), userAgent, ip, expiresAt: addWeeks(new Date(), 2), }); } else { // user doesn't exist yet, create a new one const { insertedId } = await collections.users.insertOne({ _id: new ObjectId(), createdAt: new Date(), updatedAt: new Date(), username, name, email, avatarUrl, hfUserId, }); userId = insertedId; await collections.sessions.insertOne({ _id: new ObjectId(), sessionId: locals.sessionId, userId, createdAt: new Date(), updatedAt: new Date(), userAgent, ip, expiresAt: addWeeks(new Date(), 2), }); // move pre-existing settings to new user const { matchedCount } = await collections.settings.updateOne( { sessionId: previousSessionId }, { $set: { userId, updatedAt: new Date() }, $unset: { sessionId: "" }, } ); if (!matchedCount) { // if no settings found for user, create default settings await collections.settings.insertOne({ userId, ethicsModalAcceptedAt: new Date(), updatedAt: new Date(), createdAt: new Date(), ...DEFAULT_SETTINGS, }); } } // refresh session cookie refreshSessionCookie(cookies, secretSessionId); // migrate pre-existing conversations await collections.conversations.updateMany( { sessionId: previousSessionId }, { $set: { userId }, $unset: { sessionId: "" }, } ); }
chat-ui/src/routes/login/callback/updateUser.ts/0
{ "file_path": "chat-ui/src/routes/login/callback/updateUser.ts", "repo_id": "chat-ui", "token_count": 1472 }
102
import { base } from "$app/paths"; import { redirect } from "@sveltejs/kit"; export async function load({ parent, params }) { const data = await parent(); const assistant = data.settings.assistants.find((id) => id === params.assistantId); if (!assistant) { throw redirect(302, `${base}/assistant/${params.assistantId}`); } return data; }
chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.ts/0
{ "file_path": "chat-ui/src/routes/settings/(nav)/assistants/[assistantId]/+page.ts", "repo_id": "chat-ui", "token_count": 115 }
103
<div dir="rtl" style="direction:rtl;text-align:right;"> # مقدمة مرحبًا بك في دورة Hugging Face! ستساعدك هذه المقدمة خلال إعداد بيئة العمل. إذا كنت قد بدأت الدورة للتو، فننصحك أولاً بإلقاء نظرة على [الفصل 1](/course/chapter1)، ثم العودة وإعداد بيئتك حتى تتمكن من تجربة الكود بنفسك. تتوفر جميع المكتبات التي سنستخدمها في هذه الدورة التدريبية على شكل حزم (Package) Python، لذلك سنوضح لك هنا كيفية إعداد بيئة Python وتثبيت المكتبات المحددة التي ستحتاج إليها. سنغطي طريقتين لإعداد بيئة العمل الخاصة بك، باستخدام دفتر Colab أو بيئة Python الافتراضية. لا تتردد في اختيار البيئة التي تناسبك أكثر.نوصي المبتدئين بشدة أن يبدأوا باستخدام دفتر Colab. لاحظ أننا لن نغطي نظام Windows. إذا كنت تعمل على نظام Windows، فإننا نوصي بمتابعة استخدام دفتر Colab. إذا كنت تستخدم توزيعة Linux أو macOS، فيمكنك استخدام أي من الطريقتين الموضحتين هنا. تعتمد معظم الدورة على امتلاكك لحساب Hugging Face. نوصي بإنشاء حساب الآن: [إنشاء حساب](https://huggingface.co/join). ## استخدام دفتر Google Colab يعد استخدام دفتر Colab أبسط إعداد ممكن؛ فقط قم بتشغيل دفتر Colab في متصفحك ابدأ مباشرة بالبرمجة! إذا لم تكن معتادًا على Colab، نوصيك بالبدء باتباع [المقدمة](https://colab.research.google.com/notebooks/intro.ipynb). يتيح لك Colab استخدام بعض أجهزة التسريع، مثل GPUs أو TPUs، وهو مجاني في حال تشغيل مهمات خفيفة. بمجرد أن تشعر بالأريحية في التنقل في Colab، أنشئ دفتر ملاحظات جديدًا وابدأ في الإعداد: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/> </div> الخطوة التالية هي تثبيت المكتبات التي سنستخدمها في هذه الدورة. سنستخدم `pip` للتثبيت، وهو مدير الحزم لPython. حتى تتمكن من تثبيت مكتبة 🤗 Transformers يمكنك تشغيل أوامر النظام عن طريق تسبقها بالحرف `!` في دفتر Colab, على النحو التالي: </div> ``` !pip install transformers ``` <div dir="rtl" style="direction:rtl;text-align:right;"> يمكنك التأكد من تثبيت الحزمة بشكل صحيح عن طريق استيرادها (import) خلال وقت تشغيل Python: </div> ``` import transformers ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="A gif showing the result of the two commands above: installation and import" width="80%"/> </div> <div dir="rtl" style="direction:rtl;text-align:right;"> هذا يثبت نسخة خفيفة جدا من مكتبة 🤗 Transformers. أي أنه لم يتم تثبيت أي إطارات عمل محددة للتعلم الآلي (مثل PyTorch أو TensorFlow). نوصي بتثبيت "إصدار التطوير" للمكتبة لأننا سوف نستخدم الكثير من الميزات المختلفة, و هذا الإصدار يأتي مع جميع التبعيات المطلوبة تقريباً لأي حالة استخدام يمكن تخيلها: </div> ``` !pip install transformers[sentencepiece] ``` <div dir="rtl" style="direction:rtl;text-align:right;"> سيستغرق هذا بعض الوقت، لكنك ستكون جاهزًا بعد ذلك لبقية الدورة! ## استخدام بيئة Python افتراضية إذا كنت تفضل استخدام بيئة Python الافتراضية، فإن الخطوة الأولى هي تثبيت Python على نظامك. للبدء, نوصي باتباع [دليل الإرشادات هذا](https://realpython.com/installing-python/). بمجرد تثبيت Python، يجب أن تكون قادرًا على تشغيل أوامر Python في الجهاز المستخدم. للتأكد من تثبيته بشكل صحيح قبل المتابعة إلى الخطوات التالية يمكنك البدء بتشغيل الأمر التالي: `python --version`. يجب أن يطبع هذا إصدار Python المتاح الآن على نظامك. عند تشغيل أمر Python في الجهاز المستخدم، مثل `python --version`، يجب أن تفكر في البرنامج الذي يقوم بتشغيل الأمر الخاص بك باعتباره Python "الرئيسي" على نظامك. نوصي بالحفاظ على هذا التثبيت الرئيسي خاليًا من أي حزم، واستخدامه لإنشاء بيئات منفصلة لكل تطبيق تعمل عليه, وبهذه الطريقة، يمكن لكل تطبيق أن يكون له تبعيات وحزم خاصة به، ولن تقلق بشأن مشكلات التوافق المحتملة مع تطبيقات أخرى. في Python، يتم ذلك باستخدام [* البيئات الافتراضية *](https://docs.python.org/3/tutorial/venv.html)، وهي عبارة عن تفرعات من المجلدات كل منها قائم بحد ذاته, ويحتوي كل منها على Python مثبت بإصدار معين بالإضافة إلى جميع الحزم التي يحتاجها التطبيق. يمكن إنشاء مثل هذه البيئة الافتراضية باستخدام عدد من الأدوات المختلفة ، لكننا سنستخدم حزمة Python الرسمية لهذا الغرض، والتي تسمى [`venv`](https://docs.python.org/3/library/venv.html#module-venv). أولاً، قم بإنشاء المجلد الذي تريد أن يتواجد فيه التطبيق الخاص بك -على سبيل المثال، قد ترغب في إنشاء مجلد جديد يسمى *transformers-course* في المجلد الرئيسي للدورة: </div> ``` mkdir ~/transformers-course cd ~/transformers-course ``` <div dir="rtl" style="direction:rtl;text-align:right;"> من داخل هذا المجلد، أنشئ بيئة افتراضية باستخدام وحدة Python `venv`: </div> ``` python -m venv .env ``` <div dir="rtl" style="direction:rtl;text-align:right;"> يجب أن يكون لديك الآن مجلد يسمى *.env* في المجلد الفارغ الخاص بك: </div> ``` ls -a ``` ```out . .. .env ``` <div dir="rtl" style="direction:rtl;text-align:right;"> يمكنك الدخول والخروج من بيئتك الافتراضية باستخدام أوامر "التنشيط" و "إلغاء التنشيط": </div> ``` # Activate the virtual environment source .env/bin/activate # Deactivate the virtual environment source .env/bin/deactivate ``` <div dir="rtl" style="direction:rtl;text-align:right;"> يمكنك التأكد من تنشيط البيئة عن طريق تشغيل الأمر `which python`: إذا كان يشير إلى البيئة الافتراضية، فقد قمت بتنشيطها بنجاح! </div> ``` which python ``` ```out /home/<user>/transformers-course/.env/bin/python ``` <div dir="rtl" style="direction:rtl;text-align:right;"> ### تثبيت التبعيات كما في القسم السابق حول استخدام مثيلات Google Colab، ستحتاج الآن إلى تثبيت الحزم المطلوبة للمتابعة. مرة أخرى، يمكنك تثبيت إصدار التطوير من 🤗 Transformers باستخدام مدير الحزم `pip`: </div> ``` pip install "transformers[sentencepiece]" ``` <div dir="rtl" style="direction:rtl;text-align:right;"> أنت الآن جاهز تمامًا للانطلاق! </div>
course/chapters/ar/chapter0/1.mdx/0
{ "file_path": "course/chapters/ar/chapter0/1.mdx", "repo_id": "course", "token_count": 4362 }
104
# Bias und Einschränkungen <CourseFloatingBanner chapter={1} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/de/chapter1/section8.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/de/chapter1/section8.ipynb"}, ]} /> Wenn du vorhast, ein vortrainiertes Modell oder eine feingetunte Modellversion in der Produktion zu verwenden, sei dir bitte darüber im Klaren, dass diese zwar leistungsstarke Werkzeuge sind, allerdings aber auch ihre Grenzen haben. Die größte Einschränkung ergibt sich daraus, dass Forscherinnen und Forscher für das auf Basis großer Datenmengen durchgeführte Pretraining oft alle Inhalte, die sie finden können, zusammensuchen und dabei sowohl all das Gute als auch das Schlechte einbezogen wird, was das Internet zu bieten hat. Greifen wir zur Veranschaulichung noch einmal das Beispiel einer `fill-mask`-Pipeline mit dem BERT-Modell auf: ```python from transformers import pipeline unmasker = pipeline("fill-mask", model="bert-base-uncased") result = unmasker("This man works as a [MASK].") print([r["token_str"] for r in result]) result = unmasker("This woman works as a [MASK].") print([r["token_str"] for r in result]) ``` ```python out ['lawyer', 'carpenter', 'doctor', 'waiter', 'mechanic'] ['nurse', 'waitress', 'teacher', 'maid', 'prostitute'] ``` Wenn das Modell aufgefordert wird, das fehlende Wort in diesen beiden Sätzen zu ergänzen, gibt es lediglich eine geschlechtsneutrale Antwort (Kellnerin/Kellner - waitress/waiter). Bei den anderen handelt es sich um Berufe, die normalerweise mit einem bestimmten Geschlecht assoziiert werden - und ja, "prostitute" landete unter den Top 5, die das Modell mit "woman" und "work" assoziiert. Und das, obwohl BERT eines der wenigen Transformer-Modelle ist, das nicht auf Daten aus dem gesamten Internet beruht, sondern auf vermeintlich neutralen Daten (es wurde auf dem [englischsprachigen Wikipedia-](https://huggingface.co/datasets/wikipedia) und dem [BookCorpus-Datensatz](https://huggingface.co/datasets/bookcorpus) trainiert). Wenn du diese Werkzeuge verwendest, musst du daher im Hinterkopf behalten, dass das ursprüngliche Modell, das du verwendest, sehr leicht sexistische, rassistische oder homophobe Inhalte hervorbringen könnte. Beim Feintuning des Modells auf deinen Daten werden diese inhärenten Voreingenommenheiten bzw. Vorurteile (engl. Bias) nicht verschwinden.
course/chapters/de/chapter1/8.mdx/0
{ "file_path": "course/chapters/de/chapter1/8.mdx", "repo_id": "course", "token_count": 969 }
105
- title: 0. Setup sections: - local: chapter0/1 title: Introduction - title: 1. Transformer models sections: - local: chapter1/1 title: Introduction - local: chapter1/2 title: Natural Language Processing - local: chapter1/3 title: Transformers, what can they do? - local: chapter1/4 title: How do Transformers work? - local: chapter1/5 title: Encoder models - local: chapter1/6 title: Decoder models - local: chapter1/7 title: Sequence-to-sequence models - local: chapter1/8 title: Bias and limitations - local: chapter1/9 title: Summary - local: chapter1/10 title: End-of-chapter quiz quiz: 1 - title: 2. Using 🤗 Transformers sections: - local: chapter2/1 title: Introduction - local: chapter2/2 title: Behind the pipeline - local: chapter2/3 title: Models - local: chapter2/4 title: Tokenizers - local: chapter2/5 title: Handling multiple sequences - local: chapter2/6 title: Putting it all together - local: chapter2/7 title: Basic usage completed! - local: chapter2/8 title: End-of-chapter quiz quiz: 2 - title: 3. Fine-tuning a pretrained model sections: - local: chapter3/1 title: Introduction - local: chapter3/2 title: Processing the data - local: chapter3/3 title: Fine-tuning a model with the Trainer API or Keras local_fw: { pt: chapter3/3, tf: chapter3/3_tf } - local: chapter3/4 title: A full training - local: chapter3/5 title: Fine-tuning, Check! - local: chapter3/6 title: End-of-chapter quiz quiz: 3 - title: 4. Sharing models and tokenizers sections: - local: chapter4/1 title: The Hugging Face Hub - local: chapter4/2 title: Using pretrained models - local: chapter4/3 title: Sharing pretrained models - local: chapter4/4 title: Building a model card - local: chapter4/5 title: Part 1 completed! - local: chapter4/6 title: End-of-chapter quiz quiz: 4 - title: 5. The 🤗 Datasets library sections: - local: chapter5/1 title: Introduction - local: chapter5/2 title: What if my dataset isn't on the Hub? - local: chapter5/3 title: Time to slice and dice - local: chapter5/4 title: Big data? 🤗 Datasets to the rescue! - local: chapter5/5 title: Creating your own dataset - local: chapter5/6 title: Semantic search with FAISS - local: chapter5/7 title: 🤗 Datasets, check! - local: chapter5/8 title: End-of-chapter quiz quiz: 5 - title: 6. The 🤗 Tokenizers library sections: - local: chapter6/1 title: Introduction - local: chapter6/2 title: Training a new tokenizer from an old one - local: chapter6/3 title: Fast tokenizers' special powers - local: chapter6/3b title: Fast tokenizers in the QA pipeline - local: chapter6/4 title: Normalization and pre-tokenization - local: chapter6/5 title: Byte-Pair Encoding tokenization - local: chapter6/6 title: WordPiece tokenization - local: chapter6/7 title: Unigram tokenization - local: chapter6/8 title: Building a tokenizer, block by block - local: chapter6/9 title: Tokenizers, check! - local: chapter6/10 title: End-of-chapter quiz quiz: 6 - title: 7. Main NLP tasks sections: - local: chapter7/1 title: Introduction - local: chapter7/2 title: Token classification - local: chapter7/3 title: Fine-tuning a masked language model - local: chapter7/4 title: Translation - local: chapter7/5 title: Summarization - local: chapter7/6 title: Training a causal language model from scratch - local: chapter7/7 title: Question answering - local: chapter7/8 title: Mastering NLP - local: chapter7/9 title: End-of-chapter quiz quiz: 7 - title: 8. How to ask for help sections: - local: chapter8/1 title: Introduction - local: chapter8/2 title: What to do when you get an error - local: chapter8/3 title: Asking for help on the forums - local: chapter8/4 title: Debugging the training pipeline local_fw: { pt: chapter8/4, tf: chapter8/4_tf } - local: chapter8/5 title: How to write a good issue - local: chapter8/6 title: Part 2 completed! - local: chapter8/7 title: End-of-chapter quiz quiz: 8 - title: 9. Building and sharing demos new: true subtitle: I trained a model, but how can I show it off? sections: - local: chapter9/1 title: Introduction to Gradio - local: chapter9/2 title: Building your first demo - local: chapter9/3 title: Understanding the Interface class - local: chapter9/4 title: Sharing demos with others - local: chapter9/5 title: Integrations with the Hugging Face Hub - local: chapter9/6 title: Advanced Interface features - local: chapter9/7 title: Introduction to Blocks - local: chapter9/8 title: Gradio, check! - local: chapter9/9 title: End-of-chapter quiz quiz: 9 - title: Course Events sections: - local: events/1 title: Live sessions and workshops - local: events/2 title: Part 2 release event - local: events/3 title: Gradio Blocks party
course/chapters/en/_toctree.yml/0
{ "file_path": "course/chapters/en/_toctree.yml", "repo_id": "course", "token_count": 1842 }
106
<FrameworkSwitchCourse {fw} /> # Handling multiple sequences[[handling-multiple-sequences]] {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_tf.ipynb"}, ]} /> {/if} {#if fw === 'pt'} <Youtube id="M6adb1j2jPI"/> {:else} <Youtube id="ROxrFOEbsQE"/> {/if} In the previous section, we explored the simplest of use cases: doing inference on a single sequence of a small length. However, some questions emerge already: - How do we handle multiple sequences? - How do we handle multiple sequences *of different lengths*? - Are vocabulary indices the only inputs that allow a model to work well? - Is there such a thing as too long a sequence? Let's see what kinds of problems these questions pose, and how we can solve them using the 🤗 Transformers API. ## Models expect a batch of inputs[[models-expect-a-batch-of-inputs]] In the previous exercise you saw how sequences get translated into lists of numbers. Let's convert this list of numbers to a tensor and send it to the model: {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = torch.tensor(ids) # This line will fail. model(input_ids) ``` ```python out IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = tf.constant(ids) # This line will fail. model(input_ids) ``` ```py out InvalidArgumentError: Input to reshape is a tensor with 14 values, but the requested shape has 196 [Op:Reshape] ``` {/if} Oh no! Why did this fail? "We followed the steps from the pipeline in section 2. The problem is that we sent a single sequence to the model, whereas 🤗 Transformers models expect multiple sentences by default. Here we tried to do everything the tokenizer did behind the scenes when we applied it to a `sequence`. But if you look closely, you'll see that the tokenizer didn't just convert the list of input IDs into a tensor, it added a dimension on top of it: {#if fw === 'pt'} ```py tokenized_inputs = tokenizer(sequence, return_tensors="pt") print(tokenized_inputs["input_ids"]) ``` ```python out tensor([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102]]) ``` {:else} ```py tokenized_inputs = tokenizer(sequence, return_tensors="tf") print(tokenized_inputs["input_ids"]) ``` ```py out <tf.Tensor: shape=(1, 16), dtype=int32, numpy= array([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102]], dtype=int32)> ``` {/if} Let's try again and add a new dimension: {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = torch.tensor([ids]) print("Input IDs:", input_ids) output = model(input_ids) print("Logits:", output.logits) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = tf.constant([ids]) print("Input IDs:", input_ids) output = model(input_ids) print("Logits:", output.logits) ``` {/if} We print the input IDs as well as the resulting logits — here's the output: {#if fw === 'pt'} ```python out Input IDs: [[ 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012]] Logits: [[-2.7276, 2.8789]] ``` {:else} ```py out Input IDs: tf.Tensor( [[ 1045 1005 2310 2042 3403 2005 1037 17662 12172 2607 2026 2878 2166 1012]], shape=(1, 14), dtype=int32) Logits: tf.Tensor([[-2.7276208 2.8789377]], shape=(1, 2), dtype=float32) ``` {/if} *Batching* is the act of sending multiple sentences through the model, all at once. If you only have one sentence, you can just build a batch with a single sequence: ``` batched_ids = [ids, ids] ``` This is a batch of two identical sequences! <Tip> ✏️ **Try it out!** Convert this `batched_ids` list into a tensor and pass it through your model. Check that you obtain the same logits as before (but twice)! </Tip> Batching allows the model to work when you feed it multiple sentences. Using multiple sequences is just as simple as building a batch with a single sequence. There's a second issue, though. When you're trying to batch together two (or more) sentences, they might be of different lengths. If you've ever worked with tensors before, you know that they need to be of rectangular shape, so you won't be able to convert the list of input IDs into a tensor directly. To work around this problem, we usually *pad* the inputs. ## Padding the inputs[[padding-the-inputs]] The following list of lists cannot be converted to a tensor: ```py no-format batched_ids = [ [200, 200, 200], [200, 200] ] ``` In order to work around this, we'll use *padding* to make our tensors have a rectangular shape. Padding makes sure all our sentences have the same length by adding a special word called the *padding token* to the sentences with fewer values. For example, if you have 10 sentences with 10 words and 1 sentence with 20 words, padding will ensure all the sentences have 20 words. In our example, the resulting tensor looks like this: ```py no-format padding_id = 100 batched_ids = [ [200, 200, 200], [200, 200, padding_id], ] ``` The padding token ID can be found in `tokenizer.pad_token_id`. Let's use it and send our two sentences through the model individually and batched together: {#if fw === 'pt'} ```py no-format model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence1_ids = [[200, 200, 200]] sequence2_ids = [[200, 200]] batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] print(model(torch.tensor(sequence1_ids)).logits) print(model(torch.tensor(sequence2_ids)).logits) print(model(torch.tensor(batched_ids)).logits) ``` ```python out tensor([[ 1.5694, -1.3895]], grad_fn=<AddmmBackward>) tensor([[ 0.5803, -0.4125]], grad_fn=<AddmmBackward>) tensor([[ 1.5694, -1.3895], [ 1.3373, -1.2163]], grad_fn=<AddmmBackward>) ``` {:else} ```py no-format model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence1_ids = [[200, 200, 200]] sequence2_ids = [[200, 200]] batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] print(model(tf.constant(sequence1_ids)).logits) print(model(tf.constant(sequence2_ids)).logits) print(model(tf.constant(batched_ids)).logits) ``` ```py out tf.Tensor([[ 1.5693678 -1.3894581]], shape=(1, 2), dtype=float32) tf.Tensor([[ 0.5803005 -0.41252428]], shape=(1, 2), dtype=float32) tf.Tensor( [[ 1.5693681 -1.3894582] [ 1.3373486 -1.2163193]], shape=(2, 2), dtype=float32) ``` {/if} There's something wrong with the logits in our batched predictions: the second row should be the same as the logits for the second sentence, but we've got completely different values! This is because the key feature of Transformer models is attention layers that *contextualize* each token. These will take into account the padding tokens since they attend to all of the tokens of a sequence. To get the same result when passing individual sentences of different lengths through the model or when passing a batch with the same sentences and padding applied, we need to tell those attention layers to ignore the padding tokens. This is done by using an attention mask. ## Attention masks[[attention-masks]] *Attention masks* are tensors with the exact same shape as the input IDs tensor, filled with 0s and 1s: 1s indicate the corresponding tokens should be attended to, and 0s indicate the corresponding tokens should not be attended to (i.e., they should be ignored by the attention layers of the model). Let's complete the previous example with an attention mask: {#if fw === 'pt'} ```py no-format batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] attention_mask = [ [1, 1, 1], [1, 1, 0], ] outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask)) print(outputs.logits) ``` ```python out tensor([[ 1.5694, -1.3895], [ 0.5803, -0.4125]], grad_fn=<AddmmBackward>) ``` {:else} ```py no-format batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] attention_mask = [ [1, 1, 1], [1, 1, 0], ] outputs = model(tf.constant(batched_ids), attention_mask=tf.constant(attention_mask)) print(outputs.logits) ``` ```py out tf.Tensor( [[ 1.5693681 -1.3894582 ] [ 0.5803021 -0.41252586]], shape=(2, 2), dtype=float32) ``` {/if} Now we get the same logits for the second sentence in the batch. Notice how the last value of the second sequence is a padding ID, which is a 0 value in the attention mask. <Tip> ✏️ **Try it out!** Apply the tokenization manually on the two sentences used in section 2 ("I've been waiting for a HuggingFace course my whole life." and "I hate this so much!"). Pass them through the model and check that you get the same logits as in section 2. Now batch them together using the padding token, then create the proper attention mask. Check that you obtain the same results when going through the model! </Tip> ## Longer sequences[[longer-sequences]] With Transformer models, there is a limit to the lengths of the sequences we can pass the models. Most models handle sequences of up to 512 or 1024 tokens, and will crash when asked to process longer sequences. There are two solutions to this problem: - Use a model with a longer supported sequence length. - Truncate your sequences. Models have different supported sequence lengths, and some specialize in handling very long sequences. [Longformer](https://huggingface.co/docs/transformers/model_doc/longformer) is one example, and another is [LED](https://huggingface.co/docs/transformers/model_doc/led). If you're working on a task that requires very long sequences, we recommend you take a look at those models. Otherwise, we recommend you truncate your sequences by specifying the `max_sequence_length` parameter: ```py sequence = sequence[:max_sequence_length] ```
course/chapters/en/chapter2/5.mdx/0
{ "file_path": "course/chapters/en/chapter2/5.mdx", "repo_id": "course", "token_count": 4104 }
107
<FrameworkSwitchCourse {fw} /> <!-- DISABLE-FRONTMATTER-SECTIONS --> # End-of-chapter quiz[[end-of-chapter-quiz]] <CourseFloatingBanner chapter={4} classNames="absolute z-10 right-0 top-0" /> Let's test what you learned in this chapter! ### 1. What are models on the Hub limited to? <Question choices={[ { text: "Models from the 🤗 Transformers library.", explain: "While models from the 🤗 Transformers library are supported on the Hugging Face Hub, they're not the only ones!" }, { text: "All models with a similar interface to 🤗 Transformers.", explain: "No interface requirement is set when uploading models to the Hugging Face Hub. " }, { text: "There are no limits.", explain: "Right! There are no limits when uploading models to the Hub.", correct: true }, { text: "Models that are in some way related to NLP.", explain: "No requirement is set regarding the field of application!" } ]} /> ### 2. How can you manage models on the Hub? <Question choices={[ { text: "Through a GCP account.", explain: "Incorrect!" }, { text: "Through peer-to-peer distribution.", explain: "Incorrect!" }, { text: "Through git and git-lfs.", explain: "Correct! Models on the Hub are simple Git repositories, leveraging <code>git-lfs</code> for large files.", correct: true } ]} /> ### 3. What can you do using the Hugging Face Hub web interface? <Question choices={[ { text: "Fork an existing repository.", explain: "Forking a repository is not possible on the Hugging Face Hub." }, { text: "Create a new model repository.", explain: "Correct! That's not all you can do, though.", correct: true }, { text: "Manage and edit files.", explain: "Correct! That's not the only right answer, though.", correct: true }, { text: "Upload files.", explain: "Right! But that's not all.", correct: true }, { text: "See diffs across versions.", explain: "Correct! That's not all you can do, though.", correct: true } ]} /> ### 4. What is a model card? <Question choices={[ { text: "A rough description of the model, therefore less important than the model and tokenizer files.", explain: "It is indeed a description of the model, but it's an important piece: if it's incomplete or absent the model's utility is drastically reduced." }, { text: "A way to ensure reproducibility, reusability, and fairness.", explain: "Correct! Sharing the right information in the model card will help users leverage your model and be aware of its limits and biases. ", correct: true }, { text: "A Python file that can be run to retrieve information about the model.", explain: "Model cards are simple Markdown files." } ]} /> ### 5. Which of these objects of the 🤗 Transformers library can be directly shared on the Hub with `push_to_hub()`? {#if fw === 'pt'} <Question choices={[ { text: "A tokenizer", explain: "Correct! All tokenizers have the <code>push_to_hub</code> method, and using it will push all the tokenizer files (vocabulary, architecture of the tokenizer, etc.) to a given repo. That's not the only right answer, though!", correct: true }, { text: "A model configuration", explain: "Right! All model configurations have the <code>push_to_hub</code> method, and using it will push them to a given repo. What else can you share?", correct: true }, { text: "A model", explain: "Correct! All models have the <code>push_to_hub</code> method, and using it will push them and their configuration files to a given repo. That's not all you can share, though.", correct: true }, { text: "A Trainer", explain: "That's right — the <code>Trainer</code> also implements the <code>push_to_hub</code> method, and using it will upload the model, its configuration, the tokenizer, and a model card draft to a given repo. Try another answer!", correct: true } ]} /> {:else} <Question choices={[ { text: "A tokenizer", explain: "Correct! All tokenizers have the <code>push_to_hub</code> method, and using it will push all the tokenizer files (vocabulary, architecture of the tokenizer, etc.) to a given repo. That's not the only right answer, though!", correct: true }, { text: "A model configuration", explain: "Right! All model configurations have the <code>push_to_hub</code> method, and using it will push them to a given repo. What else can you share?", correct: true }, { text: "A model", explain: "Correct! All models have the <code>push_to_hub</code> method, and using it will push them and their configuration files to a given repo. That's not all you can share, though.", correct: true }, { text: "All of the above with a dedicated callback", explain: "That's right — the <code>PushToHubCallback</code> will regularly send all of those objects to a repo during training.", correct: true } ]} /> {/if} ### 6. What is the first step when using the `push_to_hub()` method or the CLI tools? <Question choices={[ { text: "Log in on the website.", explain: "This won't help you on your local machine." }, { text: "Run 'huggingface-cli login' in a terminal.", explain: "Correct — this will download and cache your personal token.", correct: true }, { text: "Run 'notebook_login()' in a notebook.", explain: "Correct — this will display a widget to let you authenticate.", correct: true }, ]} /> ### 7. You're using a model and a tokenizer — how can you upload them to the Hub? <Question choices={[ { text: "By calling the push_to_hub method directly on the model and the tokenizer.", explain: "Correct!", correct: true }, { text: "Within the Python runtime, by wrapping them in a <code>huggingface_hub</code> utility.", explain: "Models and tokenizers already benefit from <code>huggingface_hub</code> utilities: no need for additional wrapping!" }, { text: "By saving them to disk and calling <code>transformers-cli upload-model</code>", explain: "The command <code>upload-model</code> does not exist." } ]} /> ### 8. Which git operations can you do with the `Repository` class? <Question choices={[ { text: "A commit.", explain: "Correct, the <code>git_commit()</code> method is there for that.", correct: true }, { text: "A pull", explain: "That is the purpose of the <code>git_pull()</code> method.", correct: true }, { text: "A push", explain: "The method <code>git_push()</code> does this.", correct: true }, { text: "A merge", explain: "No, that operation will never be possible with this API." } ]} />
course/chapters/en/chapter4/6.mdx/0
{ "file_path": "course/chapters/en/chapter4/6.mdx", "repo_id": "course", "token_count": 2506 }
108
# WordPiece tokenization[[wordpiece-tokenization]] <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter6/section6.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter6/section6.ipynb"}, ]} /> WordPiece is the tokenization algorithm Google developed to pretrain BERT. It has since been reused in quite a few Transformer models based on BERT, such as DistilBERT, MobileBERT, Funnel Transformers, and MPNET. It's very similar to BPE in terms of the training, but the actual tokenization is done differently. <Youtube id="qpv6ms_t_1A"/> <Tip> 💡 This section covers WordPiece in depth, going as far as showing a full implementation. You can skip to the end if you just want a general overview of the tokenization algorithm. </Tip> ## Training algorithm[[training-algorithm]] <Tip warning={true}> ⚠️ Google never open-sourced its implementation of the training algorithm of WordPiece, so what follows is our best guess based on the published literature. It may not be 100% accurate. </Tip> Like BPE, WordPiece starts from a small vocabulary including the special tokens used by the model and the initial alphabet. Since it identifies subwords by adding a prefix (like `##` for BERT), each word is initially split by adding that prefix to all the characters inside the word. So, for instance, `"word"` gets split like this: ``` w ##o ##r ##d ``` Thus, the initial alphabet contains all the characters present at the beginning of a word and the characters present inside a word preceded by the WordPiece prefix. Then, again like BPE, WordPiece learns merge rules. The main difference is the way the pair to be merged is selected. Instead of selecting the most frequent pair, WordPiece computes a score for each pair, using the following formula: $$\mathrm{score} = (\mathrm{freq\_of\_pair}) / (\mathrm{freq\_of\_first\_element} \times \mathrm{freq\_of\_second\_element})$$ By dividing the frequency of the pair by the product of the frequencies of each of its parts, the algorithm prioritizes the merging of pairs where the individual parts are less frequent in the vocabulary. For instance, it won't necessarily merge `("un", "##able")` even if that pair occurs very frequently in the vocabulary, because the two pairs `"un"` and `"##able"` will likely each appear in a lot of other words and have a high frequency. In contrast, a pair like `("hu", "##gging")` will probably be merged faster (assuming the word "hugging" appears often in the vocabulary) since `"hu"` and `"##gging"` are likely to be less frequent individually. Let's look at the same vocabulary we used in the BPE training example: ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) ``` The splits here will be: ``` ("h" "##u" "##g", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("h" "##u" "##g" "##s", 5) ``` so the initial vocabulary will be `["b", "h", "p", "##g", "##n", "##s", "##u"]` (if we forget about special tokens for now). The most frequent pair is `("##u", "##g")` (present 20 times), but the individual frequency of `"##u"` is very high, so its score is not the highest (it's 1 / 36). All pairs with a `"##u"` actually have that same score (1 / 36), so the best score goes to the pair `("##g", "##s")` -- the only one without a `"##u"` -- at 1 / 20, and the first merge learned is `("##g", "##s") -> ("##gs")`. Note that when we merge, we remove the `##` between the two tokens, so we add `"##gs"` to the vocabulary and apply the merge in the words of the corpus: ``` Vocabulary: ["b", "h", "p", "##g", "##n", "##s", "##u", "##gs"] Corpus: ("h" "##u" "##g", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("h" "##u" "##gs", 5) ``` At this point, `"##u"` is in all the possible pairs, so they all end up with the same score. Let's say that in this case, the first pair is merged, so `("h", "##u") -> "hu"`. This takes us to: ``` Vocabulary: ["b", "h", "p", "##g", "##n", "##s", "##u", "##gs", "hu"] Corpus: ("hu" "##g", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("hu" "##gs", 5) ``` Then the next best score is shared by `("hu", "##g")` and `("hu", "##gs")` (with 1/15, compared to 1/21 for all the other pairs), so the first pair with the biggest score is merged: ``` Vocabulary: ["b", "h", "p", "##g", "##n", "##s", "##u", "##gs", "hu", "hug"] Corpus: ("hug", 10), ("p" "##u" "##g", 5), ("p" "##u" "##n", 12), ("b" "##u" "##n", 4), ("hu" "##gs", 5) ``` and we continue like this until we reach the desired vocabulary size. <Tip> ✏️ **Now your turn!** What will the next merge rule be? </Tip> ## Tokenization algorithm[[tokenization-algorithm]] Tokenization differs in WordPiece and BPE in that WordPiece only saves the final vocabulary, not the merge rules learned. Starting from the word to tokenize, WordPiece finds the longest subword that is in the vocabulary, then splits on it. For instance, if we use the vocabulary learned in the example above, for the word `"hugs"` the longest subword starting from the beginning that is inside the vocabulary is `"hug"`, so we split there and get `["hug", "##s"]`. We then continue with `"##s"`, which is in the vocabulary, so the tokenization of `"hugs"` is `["hug", "##s"]`. With BPE, we would have applied the merges learned in order and tokenized this as `["hu", "##gs"]`, so the encoding is different. As another example, let's see how the word `"bugs"` would be tokenized. `"b"` is the longest subword starting at the beginning of the word that is in the vocabulary, so we split there and get `["b", "##ugs"]`. Then `"##u"` is the longest subword starting at the beginning of `"##ugs"` that is in the vocabulary, so we split there and get `["b", "##u, "##gs"]`. Finally, `"##gs"` is in the vocabulary, so this last list is the tokenization of `"bugs"`. When the tokenization gets to a stage where it's not possible to find a subword in the vocabulary, the whole word is tokenized as unknown -- so, for instance, `"mug"` would be tokenized as `["[UNK]"]`, as would `"bum"` (even if we can begin with `"b"` and `"##u"`, `"##m"` is not the vocabulary, and the resulting tokenization will just be `["[UNK]"]`, not `["b", "##u", "[UNK]"]`). This is another difference from BPE, which would only classify the individual characters not in the vocabulary as unknown. <Tip> ✏️ **Now your turn!** How will the word `"pugs"` be tokenized? </Tip> ## Implementing WordPiece[[implementing-wordpiece]] Now let's take a look at an implementation of the WordPiece algorithm. Like with BPE, this is just pedagogical, and you won't able to use this on a big corpus. We will use the same corpus as in the BPE example: ```python corpus = [ "This is the Hugging Face Course.", "This chapter is about tokenization.", "This section shows several tokenizer algorithms.", "Hopefully, you will be able to understand how they are trained and generate tokens.", ] ``` First, we need to pre-tokenize the corpus into words. Since we are replicating a WordPiece tokenizer (like BERT), we will use the `bert-base-cased` tokenizer for the pre-tokenization: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") ``` Then we compute the frequencies of each word in the corpus as we do the pre-tokenization: ```python from collections import defaultdict word_freqs = defaultdict(int) for text in corpus: words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) new_words = [word for word, offset in words_with_offsets] for word in new_words: word_freqs[word] += 1 word_freqs ``` ```python out defaultdict( int, {'This': 3, 'is': 2, 'the': 1, 'Hugging': 1, 'Face': 1, 'Course': 1, '.': 4, 'chapter': 1, 'about': 1, 'tokenization': 1, 'section': 1, 'shows': 1, 'several': 1, 'tokenizer': 1, 'algorithms': 1, 'Hopefully': 1, ',': 1, 'you': 1, 'will': 1, 'be': 1, 'able': 1, 'to': 1, 'understand': 1, 'how': 1, 'they': 1, 'are': 1, 'trained': 1, 'and': 1, 'generate': 1, 'tokens': 1}) ``` As we saw before, the alphabet is the unique set composed of all the first letters of words, and all the other letters that appear in words prefixed by `##`: ```python alphabet = [] for word in word_freqs.keys(): if word[0] not in alphabet: alphabet.append(word[0]) for letter in word[1:]: if f"##{letter}" not in alphabet: alphabet.append(f"##{letter}") alphabet.sort() alphabet print(alphabet) ``` ```python out ['##a', '##b', '##c', '##d', '##e', '##f', '##g', '##h', '##i', '##k', '##l', '##m', '##n', '##o', '##p', '##r', '##s', '##t', '##u', '##v', '##w', '##y', '##z', ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'g', 'h', 'i', 's', 't', 'u', 'w', 'y'] ``` We also add the special tokens used by the model at the beginning of that vocabulary. In the case of BERT, it's the list `["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"]`: ```python vocab = ["[PAD]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"] + alphabet.copy() ``` Next we need to split each word, with all the letters that are not the first prefixed by `##`: ```python splits = { word: [c if i == 0 else f"##{c}" for i, c in enumerate(word)] for word in word_freqs.keys() } ``` Now that we are ready for training, let's write a function that computes the score of each pair. We'll need to use this at each step of the training: ```python def compute_pair_scores(splits): letter_freqs = defaultdict(int) pair_freqs = defaultdict(int) for word, freq in word_freqs.items(): split = splits[word] if len(split) == 1: letter_freqs[split[0]] += freq continue for i in range(len(split) - 1): pair = (split[i], split[i + 1]) letter_freqs[split[i]] += freq pair_freqs[pair] += freq letter_freqs[split[-1]] += freq scores = { pair: freq / (letter_freqs[pair[0]] * letter_freqs[pair[1]]) for pair, freq in pair_freqs.items() } return scores ``` Let's have a look at a part of this dictionary after the initial splits: ```python pair_scores = compute_pair_scores(splits) for i, key in enumerate(pair_scores.keys()): print(f"{key}: {pair_scores[key]}") if i >= 5: break ``` ```python out ('T', '##h'): 0.125 ('##h', '##i'): 0.03409090909090909 ('##i', '##s'): 0.02727272727272727 ('i', '##s'): 0.1 ('t', '##h'): 0.03571428571428571 ('##h', '##e'): 0.011904761904761904 ``` Now, finding the pair with the best score only takes a quick loop: ```python best_pair = "" max_score = None for pair, score in pair_scores.items(): if max_score is None or max_score < score: best_pair = pair max_score = score print(best_pair, max_score) ``` ```python out ('a', '##b') 0.2 ``` So the first merge to learn is `('a', '##b') -> 'ab'`, and we add `'ab'` to the vocabulary: ```python vocab.append("ab") ``` To continue, we need to apply that merge in our `splits` dictionary. Let's write another function for this: ```python def merge_pair(a, b, splits): for word in word_freqs: split = splits[word] if len(split) == 1: continue i = 0 while i < len(split) - 1: if split[i] == a and split[i + 1] == b: merge = a + b[2:] if b.startswith("##") else a + b split = split[:i] + [merge] + split[i + 2 :] else: i += 1 splits[word] = split return splits ``` And we can have a look at the result of the first merge: ```py splits = merge_pair("a", "##b", splits) splits["about"] ``` ```python out ['ab', '##o', '##u', '##t'] ``` Now we have everything we need to loop until we have learned all the merges we want. Let's aim for a vocab size of 70: ```python vocab_size = 70 while len(vocab) < vocab_size: scores = compute_pair_scores(splits) best_pair, max_score = "", None for pair, score in scores.items(): if max_score is None or max_score < score: best_pair = pair max_score = score splits = merge_pair(*best_pair, splits) new_token = ( best_pair[0] + best_pair[1][2:] if best_pair[1].startswith("##") else best_pair[0] + best_pair[1] ) vocab.append(new_token) ``` We can then look at the generated vocabulary: ```py print(vocab) ``` ```python out ['[PAD]', '[UNK]', '[CLS]', '[SEP]', '[MASK]', '##a', '##b', '##c', '##d', '##e', '##f', '##g', '##h', '##i', '##k', '##l', '##m', '##n', '##o', '##p', '##r', '##s', '##t', '##u', '##v', '##w', '##y', '##z', ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'g', 'h', 'i', 's', 't', 'u', 'w', 'y', 'ab', '##fu', 'Fa', 'Fac', '##ct', '##ful', '##full', '##fully', 'Th', 'ch', '##hm', 'cha', 'chap', 'chapt', '##thm', 'Hu', 'Hug', 'Hugg', 'sh', 'th', 'is', '##thms', '##za', '##zat', '##ut'] ``` As we can see, compared to BPE, this tokenizer learns parts of words as tokens a bit faster. <Tip> 💡 Using `train_new_from_iterator()` on the same corpus won't result in the exact same vocabulary. This is because the 🤗 Tokenizers library does not implement WordPiece for the training (since we are not completely sure of its internals), but uses BPE instead. </Tip> To tokenize a new text, we pre-tokenize it, split it, then apply the tokenization algorithm on each word. That is, we look for the biggest subword starting at the beginning of the first word and split it, then we repeat the process on the second part, and so on for the rest of that word and the following words in the text: ```python def encode_word(word): tokens = [] while len(word) > 0: i = len(word) while i > 0 and word[:i] not in vocab: i -= 1 if i == 0: return ["[UNK]"] tokens.append(word[:i]) word = word[i:] if len(word) > 0: word = f"##{word}" return tokens ``` Let's test it on one word that's in the vocabulary, and another that isn't: ```python print(encode_word("Hugging")) print(encode_word("HOgging")) ``` ```python out ['Hugg', '##i', '##n', '##g'] ['[UNK]'] ``` Now, let's write a function that tokenizes a text: ```python def tokenize(text): pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text) pre_tokenized_text = [word for word, offset in pre_tokenize_result] encoded_words = [encode_word(word) for word in pre_tokenized_text] return sum(encoded_words, []) ``` We can try it on any text: ```python tokenize("This is the Hugging Face course!") ``` ```python out ['Th', '##i', '##s', 'is', 'th', '##e', 'Hugg', '##i', '##n', '##g', 'Fac', '##e', 'c', '##o', '##u', '##r', '##s', '##e', '[UNK]'] ``` That's it for the WordPiece algorithm! Now let's take a look at Unigram.
course/chapters/en/chapter6/6.mdx/0
{ "file_path": "course/chapters/en/chapter6/6.mdx", "repo_id": "course", "token_count": 5588 }
109
<FrameworkSwitchCourse {fw} /> # Debugging the training pipeline[[debugging-the-training-pipeline]] <CourseFloatingBanner chapter={8} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter8/section4.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter8/section4.ipynb"}, ]} /> You've written a beautiful script to train or fine-tune a model on a given task, dutifully following the advice from [Chapter 7](/course/chapter7). But when you launch the command `trainer.train()`, something horrible happens: you get an error 😱! Or worse, everything seems to be fine and the training runs without error, but the resulting model is crappy. In this section, we will show you what you can do to debug these kinds of issues. ## Debugging the training pipeline[[debugging-the-training-pipeline]] <Youtube id="L-WSwUWde1U"/> The problem when you encounter an error in `trainer.train()` is that it could come from multiple sources, as the `Trainer` usually puts together lots of things. It converts datasets to dataloaders, so the problem could be something wrong in your dataset, or some issue when trying to batch elements of the datasets together. Then it takes a batch of data and feeds it to the model, so the problem could be in the model code. After that, it computes the gradients and performs the optimization step, so the problem could also be in your optimizer. And even if everything goes well for training, something could still go wrong during the evaluation if there is a problem with your metric. The best way to debug an error that arises in `trainer.train()` is to manually go through this whole pipeline to see where things went awry. The error is then often very easy to solve. To demonstrate this, we will use the following script that (tries to) fine-tune a DistilBERT model on the [MNLI dataset](https://huggingface.co/datasets/glue): ```py from datasets import load_dataset import evaluate from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer, ) raw_datasets = load_dataset("glue", "mnli") model_checkpoint = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def preprocess_function(examples): return tokenizer(examples["premise"], examples["hypothesis"], truncation=True) tokenized_datasets = raw_datasets.map(preprocess_function, batched=True) model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint) args = TrainingArguments( f"distilbert-finetuned-mnli", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) metric = evaluate.load("glue", "mnli") def compute_metrics(eval_pred): predictions, labels = eval_pred return metric.compute(predictions=predictions, references=labels) trainer = Trainer( model, args, train_dataset=raw_datasets["train"], eval_dataset=raw_datasets["validation_matched"], compute_metrics=compute_metrics, ) trainer.train() ``` If you try to execute it, you will be met with a rather cryptic error: ```python out 'ValueError: You have to specify either input_ids or inputs_embeds' ``` ### Check your data[[check-your-data]] This goes without saying, but if your data is corrupted, the `Trainer` is not going to be able to form batches, let alone train your model. So first things first, you need to have a look at what is inside your training set. To avoid countless hours spent trying to fix something that is not the source of the bug, we recommend you use `trainer.train_dataset` for your checks and nothing else. So let's do that here: ```py trainer.train_dataset[0] ``` ```python out {'hypothesis': 'Product and geography are what make cream skimming work. ', 'idx': 0, 'label': 1, 'premise': 'Conceptually cream skimming has two basic dimensions - product and geography.'} ``` Do you notice something wrong? This, in conjunction with the error message about `input_ids` missing, should make you realize those are texts, not numbers the model can make sense of. Here, the original error is very misleading because the `Trainer` automatically removes the columns that don't match the model signature (that is, the arguments expected by the model). That means here, everything apart from the labels was discarded. There was thus no issue with creating batches and then sending them to the model, which in turn complained it didn't receive the proper input. Why wasn't the data processed? We did use the `Dataset.map()` method on the datasets to apply the tokenizer on each sample. But if you look closely at the code, you will see that we made a mistake when passing the training and evaluation sets to the `Trainer`. Instead of using `tokenized_datasets` here, we used `raw_datasets` 🤦. So let's fix this! ```py from datasets import load_dataset import evaluate from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer, ) raw_datasets = load_dataset("glue", "mnli") model_checkpoint = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def preprocess_function(examples): return tokenizer(examples["premise"], examples["hypothesis"], truncation=True) tokenized_datasets = raw_datasets.map(preprocess_function, batched=True) model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint) args = TrainingArguments( f"distilbert-finetuned-mnli", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) metric = evaluate.load("glue", "mnli") def compute_metrics(eval_pred): predictions, labels = eval_pred return metric.compute(predictions=predictions, references=labels) trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation_matched"], compute_metrics=compute_metrics, ) trainer.train() ``` This new code will now give a different error (progress!): ```python out 'ValueError: expected sequence of length 43 at dim 1 (got 37)' ``` Looking at the traceback, we can see the error happens in the data collation step: ```python out ~/git/transformers/src/transformers/data/data_collator.py in torch_default_data_collator(features) 105 batch[k] = torch.stack([f[k] for f in features]) 106 else: --> 107 batch[k] = torch.tensor([f[k] for f in features]) 108 109 return batch ``` So, we should move to that. Before we do, however, let's finish inspecting our data, just to be 100% sure it's correct. One thing you should always do when debugging a training session is have a look at the decoded inputs of your model. We can't make sense of the numbers that we feed it directly, so we should look at what those numbers represent. In computer vision, for example, that means looking at the decoded pictures of the pixels you pass, in speech it means listening to the decoded audio samples, and for our NLP example here it means using our tokenizer to decode the inputs: ```py tokenizer.decode(trainer.train_dataset[0]["input_ids"]) ``` ```python out '[CLS] conceptually cream skimming has two basic dimensions - product and geography. [SEP] product and geography are what make cream skimming work. [SEP]' ``` So that seems correct. You should do this for all the keys in the inputs: ```py trainer.train_dataset[0].keys() ``` ```python out dict_keys(['attention_mask', 'hypothesis', 'idx', 'input_ids', 'label', 'premise']) ``` Note that the keys that don't correspond to inputs accepted by the model will be automatically discarded, so here we will only keep `input_ids`, `attention_mask`, and `label` (which will be renamed `labels`). To double-check the model signature, you can print the class of your model, then go check its documentation: ```py type(trainer.model) ``` ```python out transformers.models.distilbert.modeling_distilbert.DistilBertForSequenceClassification ``` So in our case, we can check the parameters accepted on [this page](https://huggingface.co/transformers/model_doc/distilbert.html#distilbertforsequenceclassification). The `Trainer` will also log the columns it's discarding. We have checked that the input IDs are correct by decoding them. Next is the `attention_mask`: ```py trainer.train_dataset[0]["attention_mask"] ``` ```python out [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] ``` Since we didn't apply padding in our preprocessing, this seems perfectly natural. To be sure there is no issue with that attention mask, let's check it is the same length as our input IDs: ```py len(trainer.train_dataset[0]["attention_mask"]) == len( trainer.train_dataset[0]["input_ids"] ) ``` ```python out True ``` That's good! Lastly, let's check our label: ```py trainer.train_dataset[0]["label"] ``` ```python out 1 ``` Like the input IDs, this is a number that doesn't really make sense on its own. As we saw before, the map between integers and label names is stored inside the `names` attribute of the corresponding *feature* of the dataset: ```py trainer.train_dataset.features["label"].names ``` ```python out ['entailment', 'neutral', 'contradiction'] ``` So `1` means `neutral`, which means the two sentences we saw above are not in contradiction, and the first one does not imply the second one. That seems correct! We don't have token type IDs here, since DistilBERT does not expect them; if you have some in your model, you should also make sure that they properly match where the first and second sentences are in the input. <Tip> ✏️ **Your turn!** Check that everything seems correct with the second element of the training dataset. </Tip> We are only doing the check on the training set here, but you should of course double-check the validation and test sets the same way. Now that we know our datasets look good, it's time to check the next step of the training pipeline. ### From datasets to dataloaders[[from-datasets-to-dataloaders]] The next thing that can go wrong in the training pipeline is when the `Trainer` tries to form batches from the training or validation set. Once you are sure the `Trainer`'s datasets are correct, you can try to manually form a batch by executing the following (replace `train` with `eval` for the validation dataloader): ```py for batch in trainer.get_train_dataloader(): break ``` This code creates the training dataloader, then iterates through it, stopping at the first iteration. If the code executes without error, you have the first training batch that you can inspect, and if the code errors out, you know for sure the problem is in the dataloader, as is the case here: ```python out ~/git/transformers/src/transformers/data/data_collator.py in torch_default_data_collator(features) 105 batch[k] = torch.stack([f[k] for f in features]) 106 else: --> 107 batch[k] = torch.tensor([f[k] for f in features]) 108 109 return batch ValueError: expected sequence of length 45 at dim 1 (got 76) ``` Inspecting the last frame of the traceback should be enough to give you a clue, but let's do a bit more digging. Most of the problems during batch creation arise because of the collation of examples into a single batch, so the first thing to check when in doubt is what `collate_fn` your `DataLoader` is using: ```py data_collator = trainer.get_train_dataloader().collate_fn data_collator ``` ```python out <function transformers.data.data_collator.default_data_collator(features: List[InputDataClass], return_tensors='pt') -> Dict[str, Any]> ``` So this is the `default_data_collator`, but that's not what we want in this case. We want to pad our examples to the longest sentence in the batch, which is done by the `DataCollatorWithPadding` collator. And this data collator is supposed to be used by default by the `Trainer`, so why is it not used here? The answer is because we did not pass the `tokenizer` to the `Trainer`, so it couldn't create the `DataCollatorWithPadding` we want. In practice, you should never hesitate to explicitly pass along the data collator you want to use, to make sure you avoid these kinds of errors. Let's adapt our code to do exactly that: ```py from datasets import load_dataset import evaluate from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, DataCollatorWithPadding, TrainingArguments, Trainer, ) raw_datasets = load_dataset("glue", "mnli") model_checkpoint = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def preprocess_function(examples): return tokenizer(examples["premise"], examples["hypothesis"], truncation=True) tokenized_datasets = raw_datasets.map(preprocess_function, batched=True) model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint) args = TrainingArguments( f"distilbert-finetuned-mnli", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) metric = evaluate.load("glue", "mnli") def compute_metrics(eval_pred): predictions, labels = eval_pred return metric.compute(predictions=predictions, references=labels) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation_matched"], compute_metrics=compute_metrics, data_collator=data_collator, tokenizer=tokenizer, ) trainer.train() ``` The good news? We don't get the same error as before, which is definitely progress. The bad news? We get an infamous CUDA error instead: ```python out RuntimeError: CUDA error: CUBLAS_STATUS_ALLOC_FAILED when calling `cublasCreate(handle)` ``` This is bad because CUDA errors are extremely hard to debug in general. We will see in a minute how to solve this, but first let's finish our analysis of batch creation. If you are sure your data collator is the right one, you should try to apply it on a couple of samples of your dataset: ```py data_collator = trainer.get_train_dataloader().collate_fn batch = data_collator([trainer.train_dataset[i] for i in range(4)]) ``` This code will fail because the `train_dataset` contains string columns, which the `Trainer` usually removes. You can remove them manually, or if you want to replicate exactly what the `Trainer` is doing behind the scenes, you can call the private `Trainer._remove_unused_columns()` method that does that: ```py data_collator = trainer.get_train_dataloader().collate_fn actual_train_set = trainer._remove_unused_columns(trainer.train_dataset) batch = data_collator([actual_train_set[i] for i in range(4)]) ``` You should then be able to manually debug what happens inside the data collator if the error persists. Now that we've debugged the batch creation process, it's time to pass one through the model! ### Going through the model[[going-through-the-model]] You should be able to get a batch by executing the following command: ```py for batch in trainer.get_train_dataloader(): break ``` If you're running this code in a notebook, you may get a CUDA error that's similar to the one we saw earlier, in which case you need to restart your notebook and reexecute the last snippet without the `trainer.train()` line. That's the second most annoying thing about CUDA errors: they irremediably break your kernel. The most annoying thing about them is the fact that they are hard to debug. Why is that? It has to do with the way GPUs work. They are extremely efficient at executing a lot of operations in parallel, but the drawback is that when one of those instructions results in an error, you don't know it instantly. It's only when the program calls a synchronization of the multiple processes on the GPU that it will realize something went wrong, so the error is actually raised at a place that has nothing to do with what created it. For instance, if we look at our previous traceback, the error was raised during the backward pass, but we will see in a minute that it actually stems from something in the forward pass. So how do we debug those errors? The answer is easy: we don't. Unless your CUDA error is an out-of-memory error (which means there is not enough memory in your GPU), you should always go back to the CPU to debug it. To do this in our case, we just have to put the model back on the CPU and call it on our batch -- the batch returned by the `DataLoader` has not been moved to the GPU yet: ```python outputs = trainer.model.cpu()(**batch) ``` ```python out ~/.pyenv/versions/3.7.9/envs/base/lib/python3.7/site-packages/torch/nn/functional.py in nll_loss(input, target, weight, size_average, ignore_index, reduce, reduction) 2386 ) 2387 if dim == 2: -> 2388 ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index) 2389 elif dim == 4: 2390 ret = torch._C._nn.nll_loss2d(input, target, weight, _Reduction.get_enum(reduction), ignore_index) IndexError: Target 2 is out of bounds. ``` So, the picture is getting clearer. Instead of having a CUDA error, we now have an `IndexError` in the loss computation (so nothing to do with the backward pass, as we said earlier). More precisely, we can see that it's target 2 that creates the error, so this is a very good moment to check the number of labels of our model: ```python trainer.model.config.num_labels ``` ```python out 2 ``` With two labels, only 0s and 1s are allowed as targets, but according to the error message we got a 2. Getting a 2 is actually normal: if we remember the label names we extracted earlier, there were three, so we have indices 0, 1, and 2 in our dataset. The problem is that we didn't tell that to our model, which should have been created with three labels. So let's fix that! ```py from datasets import load_dataset import evaluate from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, DataCollatorWithPadding, TrainingArguments, Trainer, ) raw_datasets = load_dataset("glue", "mnli") model_checkpoint = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def preprocess_function(examples): return tokenizer(examples["premise"], examples["hypothesis"], truncation=True) tokenized_datasets = raw_datasets.map(preprocess_function, batched=True) model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=3) args = TrainingArguments( f"distilbert-finetuned-mnli", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) metric = evaluate.load("glue", "mnli") def compute_metrics(eval_pred): predictions, labels = eval_pred return metric.compute(predictions=predictions, references=labels) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation_matched"], compute_metrics=compute_metrics, data_collator=data_collator, tokenizer=tokenizer, ) ``` We aren't including the `trainer.train()` line yet, to take the time to check that everything looks good. If we request a batch and pass it to our model, it now works without error! ```py for batch in trainer.get_train_dataloader(): break outputs = trainer.model.cpu()(**batch) ``` The next step is then to move back to the GPU and check that everything still works: ```py import torch device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") batch = {k: v.to(device) for k, v in batch.items()} outputs = trainer.model.to(device)(**batch) ``` If you still get an error, make sure you restart your notebook and only execute the last version of the script. ### Performing one optimization step[[performing-one-optimization-step]] Now that we know that we can build batches that actually go through the model, we are ready for the next step of the training pipeline: computing the gradients and performing an optimization step. The first part is just a matter of calling the `backward()` method on the loss: ```py loss = outputs.loss loss.backward() ``` It's pretty rare to get an error at this stage, but if you do get one, make sure to go back to the CPU to get a helpful error message. To perform the optimization step, we just need to create the `optimizer` and call its `step()` method: ```py trainer.create_optimizer() trainer.optimizer.step() ``` Again, if you're using the default optimizer in the `Trainer`, you shouldn't get an error at this stage, but if you have a custom optimizer, there might be some problems to debug here. Don't forget to go back to the CPU if you get a weird CUDA error at this stage. Speaking of CUDA errors, earlier we mentioned a special case. Let's have a look at that now. ### Dealing with CUDA out-of-memory errors[[dealing-with-cuda-out-of-memory-errors]] Whenever you get an error message that starts with `RuntimeError: CUDA out of memory`, this indicates that you are out of GPU memory. This is not directly linked to your code, and it can happen with a script that runs perfectly fine. This error means that you tried to put too many things in the internal memory of your GPU, and that resulted in an error. Like with other CUDA errors, you will need to restart your kernel to be in a spot where you can run your training again. To solve this issue, you just need to use less GPU space -- something that is often easier said than done. First, make sure you don't have two models on the GPU at the same time (unless that's required for your problem, of course). Then, you should probably reduce your batch size, as it directly affects the sizes of all the intermediate outputs of the model and their gradients. If the problem persists, consider using a smaller version of your model. <Tip> In the next part of the course, we'll look at more advanced techniques that can help you reduce your memory footprint and let you fine-tune the biggest models. </Tip> ### Evaluating the model[[evaluating-the-model]] Now that we've solved all the issues with our code, everything is perfect and the training should run smoothly, right? Not so fast! If you run the `trainer.train()` command, everything will look good at first, but after a while you will get the following: ```py # This will take a long time and error out, so you shouldn't run this cell trainer.train() ``` ```python out TypeError: only size-1 arrays can be converted to Python scalars ``` You will realize this error appears during the evaluation phase, so this is the last thing we will need to debug. You can run the evaluation loop of the `Trainer` independently form the training like this: ```py trainer.evaluate() ``` ```python out TypeError: only size-1 arrays can be converted to Python scalars ``` <Tip> 💡 You should always make sure you can run `trainer.evaluate()` before launching `trainer.train()`, to avoid wasting lots of compute resources before hitting an error. </Tip> Before attempting to debug a problem in the evaluation loop, you should first make sure that you've had a look at the data, are able to form a batch properly, and can run your model on it. We've completed all of those steps, so the following code can be executed without error: ```py for batch in trainer.get_eval_dataloader(): break batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = trainer.model(**batch) ``` The error comes later, at the end of the evaluation phase, and if we look at the traceback we see this: ```python trace ~/git/datasets/src/datasets/metric.py in add_batch(self, predictions, references) 431 """ 432 batch = {"predictions": predictions, "references": references} --> 433 batch = self.info.features.encode_batch(batch) 434 if self.writer is None: 435 self._init_writer() ``` This tells us that the error originates in the `datasets/metric.py` module -- so this is a problem with our `compute_metrics()` function. It takes a tuple with the logits and the labels as NumPy arrays, so let's try to feed it that: ```py predictions = outputs.logits.cpu().numpy() labels = batch["labels"].cpu().numpy() compute_metrics((predictions, labels)) ``` ```python out TypeError: only size-1 arrays can be converted to Python scalars ``` We get the same error, so the problem definitely lies with that function. If we look back at its code, we see it's just forwarding the `predictions` and the `labels` to `metric.compute()`. So is there a problem with that method? Not really. Let's have a quick look at the shapes: ```py predictions.shape, labels.shape ``` ```python out ((8, 3), (8,)) ``` Our predictions are still logits, not the actual predictions, which is why the metric is returning this (somewhat obscure) error. The fix is pretty easy; we just have to add an argmax in the `compute_metrics()` function: ```py import numpy as np def compute_metrics(eval_pred): predictions, labels = eval_pred predictions = np.argmax(predictions, axis=1) return metric.compute(predictions=predictions, references=labels) compute_metrics((predictions, labels)) ``` ```python out {'accuracy': 0.625} ``` Now our error is fixed! This was the last one, so our script will now train a model properly. For reference, here is the completely fixed script: ```py import numpy as np from datasets import load_dataset import evaluate from transformers import ( AutoTokenizer, AutoModelForSequenceClassification, DataCollatorWithPadding, TrainingArguments, Trainer, ) raw_datasets = load_dataset("glue", "mnli") model_checkpoint = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def preprocess_function(examples): return tokenizer(examples["premise"], examples["hypothesis"], truncation=True) tokenized_datasets = raw_datasets.map(preprocess_function, batched=True) model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=3) args = TrainingArguments( f"distilbert-finetuned-mnli", evaluation_strategy="epoch", save_strategy="epoch", learning_rate=2e-5, num_train_epochs=3, weight_decay=0.01, ) metric = evaluate.load("glue", "mnli") def compute_metrics(eval_pred): predictions, labels = eval_pred predictions = np.argmax(predictions, axis=1) return metric.compute(predictions=predictions, references=labels) data_collator = DataCollatorWithPadding(tokenizer=tokenizer) trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation_matched"], compute_metrics=compute_metrics, data_collator=data_collator, tokenizer=tokenizer, ) trainer.train() ``` In this instance, there are no more problems, and our script will fine-tune a model that should give reasonable results. But what can we do when the training proceeds without any error, and the model trained does not perform well at all? That's the hardest part of machine learning, and we'll show you a few techniques that can help. <Tip> 💡 If you're using a manual training loop, the same steps apply to debug your training pipeline, but it's easier to separate them. Make sure you have not forgotten the `model.eval()` or `model.train()` at the right places, or the `zero_grad()` at each step, however! </Tip> ## Debugging silent errors during training[[debugging-silent-errors-during-training]] What can we do to debug a training that completes without error but doesn't get good results? We'll give you some pointers here, but be aware that this kind of debugging is the hardest part of machine learning, and there is no magical answer. ### Check your data (again!)[[check-your-data-again]] Your model will only learn something if it's actually possible to learn anything from your data. If there is a bug that corrupts the data or the labels are attributed randomly, it's very likely you won't get any model training on your dataset. So always start by double-checking your decoded inputs and labels, and ask yourself the following questions: - Is the decoded data understandable? - Do you agree with the labels? - Is there one label that's more common than the others? - What should the loss/metric be if the model predicted a random answer/always the same answer? <Tip warning={true}> ⚠️ If you are doing distributed training, print samples of your dataset in each process and triple-check that you get the same thing. One common bug is to have some source of randomness in the data creation that makes each process have a different version of the dataset. </Tip> After looking at your data, go through a few of the model's predictions and decode them too. If the model is always predicting the same thing, it might be because your dataset is biased toward one category (for classification problems); techniques like oversampling rare classes might help. If the loss/metric you get on your initial model is very different from the loss/metric you would expect for random predictions, double-check the way your loss or metric is computed, as there is probably a bug there. If you are using several losses that you add at the end, make sure they are of the same scale. When you are sure your data is perfect, you can see if the model is capable of training on it with one simple test. ### Overfit your model on one batch[[overfit-your-model-on-one-batch]] Overfitting is usually something we try to avoid when training, as it means the model is not learning to recognize the general features we want it to but is instead just memorizing the training samples. However, trying to train your model on one batch over and over again is a good test to check if the problem as you framed it can be solved by the model you are attempting to train. It will also help you see if your initial learning rate is too high. Doing this once you have defined your `Trainer` is really easy; just grab a batch of training data, then run a small manual training loop only using that batch for something like 20 steps: ```py for batch in trainer.get_train_dataloader(): break batch = {k: v.to(device) for k, v in batch.items()} trainer.create_optimizer() for _ in range(20): outputs = trainer.model(**batch) loss = outputs.loss loss.backward() trainer.optimizer.step() trainer.optimizer.zero_grad() ``` <Tip> 💡 If your training data is unbalanced, make sure to build a batch of training data containing all the labels. </Tip> The resulting model should have close-to-perfect results on the same `batch`. Let's compute the metric on the resulting predictions: ```py with torch.no_grad(): outputs = trainer.model(**batch) preds = outputs.logits labels = batch["labels"] compute_metrics((preds.cpu().numpy(), labels.cpu().numpy())) ``` ```python out {'accuracy': 1.0} ``` 100% accuracy, now this is a nice example of overfitting (meaning that if you try your model on any other sentence, it will very likely give you a wrong answer)! If you don't manage to have your model obtain perfect results like this, it means there is something wrong with the way you framed the problem or your data, so you should fix that. Only when you manage to pass the overfitting test can you be sure that your model can actually learn something. <Tip warning={true}> ⚠️ You will have to recreate your model and your `Trainer` after this test, as the model obtained probably won't be able to recover and learn something useful on your full dataset. </Tip> ### Don't tune anything until you have a first baseline[[dont-tune-anything-until-you-have-a-first-baseline]] Hyperparameter tuning is always emphasized as being the hardest part of machine learning, but it's just the last step to help you gain a little bit on the metric. Most of the time, the default hyperparameters of the `Trainer` will work just fine to give you good results, so don't launch into a time-consuming and costly hyperparameter search until you have something that beats the baseline you have on your dataset. Once you have a good enough model, you can start tweaking a bit. Don't try launching a thousand runs with different hyperparameters, but compare a couple of runs with different values for one hyperparameter to get an idea of which has the greatest impact. If you are tweaking the model itself, keep it simple and don't try anything you can't reasonably justify. Always make sure you go back to the overfitting test to verify that your change hasn't had any unintended consequences. ### Ask for help[[ask-for-help]] Hopefully you will have found some advice in this section that helped you solve your issue, but if that's not the case, remember you can always ask the community on the [forums](https://discuss.huggingface.co/). Here are some additional resources that may prove helpful: - ["Reproducibility as a vehicle for engineering best practices"](https://docs.google.com/presentation/d/1yHLPvPhUs2KGI5ZWo0sU-PKU3GimAk3iTsI38Z-B5Gw/edit#slide=id.p) by Joel Grus - ["Checklist for debugging neural networks"](https://towardsdatascience.com/checklist-for-debugging-neural-networks-d8b2a9434f21) by Cecelia Shao - ["How to unit test machine learning code"](https://medium.com/@keeper6928/how-to-unit-test-machine-learning-code-57cf6fd81765) by Chase Roberts - ["A Recipe for Training Neural Networks"](http://karpathy.github.io/2019/04/25/recipe/) by Andrej Karpathy Of course, not every problem you encounter when training neural nets is your own fault! If you encounter something in the 🤗 Transformers or 🤗 Datasets library that does not seem right, you may have encountered a bug. You should definitely tell us all about it, and in the next section we'll explain exactly how to do that.
course/chapters/en/chapter8/4.mdx/0
{ "file_path": "course/chapters/en/chapter8/4.mdx", "repo_id": "course", "token_count": 10164 }
110
<FrameworkSwitchCourse {fw} /> # Poniendo todo junto {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_tf.ipynb"}, ]} /> {/if} En las últimas secciones, hemos hecho nuestro mejor esfuerzo para realizar la mayor parte del trabajo a mano. Exploramos como funcionan los tokenizadores y vimos la tokenización, conversión a IDs de entrada, relleno, truncado, y máscaras de atención. Sin embargo, como vimos en la sección 3, la API de transformadores 🤗 puede manejar todo esto por nosotros con una función de alto nivel la cual trataremos aquí. Cuando llamas a tu `tokenizer` directamente en una sentencia, obtienes entradas que están lista para pasar a tu modelo: ```py from transformers import AutoTokenizer checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." model_inputs = tokenizer(sequence) ``` Aquí la variable `model_inputs` contiene todo lo necesario para que un modelo opere bien. Para DistilBERT, que incluye los IDs de entrada también como la máscara de atención. Otros modelos que aceptan entradas adicionales también tendrán las salidas del objeto `tokenizer`. Como veremos en los ejemplos de abajo, este método es muy poderoso. Primero, puede tokenizar una sola secuencia: ```py sequence = "I've been waiting for a HuggingFace course my whole life." model_inputs = tokenizer(sequence) ``` También maneja múltiples secuencias a la vez, sin cambios en la API: ```py sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] model_inputs = tokenizer(sequences) ``` Puede rellenar de acuerdo a varios objetivos: ```py # Rellenar las secuencias hasta la mayor longitud de secuencia model_inputs = tokenizer(sequences, padding="longest") # Rellenar las secuencias hasta la máxima longitud del modelo # (512 para BERT o DistilBERT) model_inputs = tokenizer(sequences, padding="max_length") # Rellenar las secuencias hasta la máxima longitud especificada model_inputs = tokenizer(sequences, padding="max_length", max_length=8) ``` También puede truncar secuencias: ```py sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] # Truncar las secuencias más largas que la máxima longitud del modelo # (512 para BERT o DistilBERT) model_inputs = tokenizer(sequences, truncation=True) # Truncar las secuencias más largas que la longitud especificada model_inputs = tokenizer(sequences, max_length=8, truncation=True) ``` El objeto `tokenizer` puede manejar la conversión a tensores de frameworks específicos, los cuales pueden ser enviados directamente al modelo. Por ejemplo, en el siguiente código de ejemplo estamos solicitando al tokenizer que regrese los tensores de los distintos frameworks — `"pt"` regresa tensores de PyTorch, `"tf"` regresa tensores de TensorFlow, y `"np"` regresa arreglos de NumPy: ```py sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] # Devuelve tensores PyTorch model_inputs = tokenizer(sequences, padding=True, return_tensors="pt") # Devuelve tensores TensorFlow model_inputs = tokenizer(sequences, padding=True, return_tensors="tf") # Devuelve arrays Numpy model_inputs = tokenizer(sequences, padding=True, return_tensors="np") ``` ## Tokens especiales Si damos un vistazo a los IDs de entrada retornados por el tokenizer, veremos que son un poquito diferentes a lo que teníamos anteriormente: ```py sequence = "I've been waiting for a HuggingFace course my whole life." model_inputs = tokenizer(sequence) print(model_inputs["input_ids"]) tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) ``` ```python out [101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102] [1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012] ``` Se agregó un ID de token al principio, y uno al final. Decodifiquemos las dos secuencias de IDs de arriba para ver de que se trata: ```py print(tokenizer.decode(model_inputs["input_ids"])) print(tokenizer.decode(ids)) ``` ```python out "[CLS] i've been waiting for a huggingface course my whole life. [SEP]" "i've been waiting for a huggingface course my whole life." ``` El tokenizador agregó la palabra especial `[CLS]` al principio y la palabra especial `[SEP]` al final. Esto se debe a que el modelo fue preentrenado con esos, así para obtener los mismos resultados por inferencia necesitamos agregarlos también. Nota que algunos modelos no agregan palabras especiales, o agregan unas distintas; los modelos también pueden agregar estas palabras especiales sólo al principio, o sólo al final. En cualquier caso, el tokenizador sabe cuáles son las esperadas y se encargará de ello por tí. ## Conclusión: Del tokenizador al modelo Ahora que hemos visto todos los pasos individuales que el objeto `tokenizer` usa cuando se aplica a textos, veamos una última vez cómo maneja varias secuencias (¡relleno!), secuencias muy largas (¡truncado!), y múltiples tipos de tensores con su API principal: {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt") output = model(**tokens) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="tf") output = model(**tokens) ``` {/if}
course/chapters/es/chapter2/6.mdx/0
{ "file_path": "course/chapters/es/chapter2/6.mdx", "repo_id": "course", "token_count": 2505 }
111
# 🤗 Datasets, ¡listo! <CourseFloatingBanner chapter={5} classNames="absolute z-10 right-0 top-0" /> Bueno, ese fue un gran tour de la librería 🤗 Datasets. ¡Felicitaciones por llegar hasta aquí! Con el conocimiento que adquiriste en este capítulo, deberías ser capaz de: - Cargar datasets de cualquier parte, sea del Hub de Hugging Face, tu computador o un servidor remoto en tu compañía. - Preparar tus datos usando una combinación de las funciones `Dataset.map()` y `Dataset.filter()`. - Cambiar rápidamente entre formatos de datos como Pandas y NumPy usando `Dataset.set_format()`. - Crear tu propio dataset y subirlo al Hub de Hugging Face. - Procesar tus documentos usando un modelo de Transformer y construir un motor de búsqueda semántica usando FAISS. En el [Capítulo 7](/course/chapter7) pondremos todo esto en práctica cuando veamos a profundidad las tareas de PLN en las que son buenos los modelos de Transformers. Antes de seguir, ¡es hora de poner a prueba tu conocimiento de 🤗 Datasets con un quiz!
course/chapters/es/chapter5/7.mdx/0
{ "file_path": "course/chapters/es/chapter5/7.mdx", "repo_id": "course", "token_count": 377 }
112
# Introduction Bienvenue au cours d'Hugging Face ! Cette introduction est là pour vous guider dans la mise en place d'un environnement de travail. Si vous venez de commencer le cours, nous vous recommandons de consulter d'abord le [chapitre 1](/course/fr/chapter1) puis de revenir ici et de configurer votre environnement afin de pouvoir essayer le code vous-même. Toutes les bibliothèques que nous utiliserons dans ce cours sont disponibles sous forme de *packages* Python. Nous allons donc vous montrer comment configurer un environnement Python et installer les bibliothèques spécifiques dont vous aurez besoin. Nous aborderons deux façons de configurer votre environnement de travail : soit en utilisant un *notebook* Google Colab, soit en utilisant un environnement virtuel Python. N'hésitez pas à choisir celle qui vous convient le mieux. Pour les débutants, nous vous recommandons vivement de commencer en utilisant un *notebook* Google Colab. Notez que nous ne couvrirons pas le système Windows. Si vous travaillez sous Windows, nous vous recommandons de suivre le cours en utilisant un *notebook* Google Colab. Si vous utilisez une distribution Linux ou macOS, vous pouvez utiliser l'une des deux approches décrites ci-dessous. La plupart du cours repose sur le fait que vous ayez un compte Hugging Face. Si vous n'en disposez pas d'un, nous vous recommandons d'en créer un dès maintenant : [créer un compte](https://huggingface.co/join). ## Utilisation d'un <i>notebook</i> Google Colab L'utilisation d'un *notebook* Google Colab est la configuration la plus simple possible. Démarrez un *notebook* dans votre navigateur et passez directement au codage ! Si vous n'êtes pas familier avec Colab, nous vous recommandons de commencer par suivre l'[introduction](https://colab.research.google.com/notebooks/intro.ipynb). Colab vous permet d'utiliser du matériel comme les GPUs ou les TPUs et est gratuit pour les petites charges de travail. Une fois que vous vous sentez suffisamment à l'aise avec Colab, créez un nouveau *notebook* et commencez à le configurer : <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/> </div> L'étape suivante consiste à installer les bibliothèques que nous allons utiliser dans ce cours. Nous utiliserons `pip` pour l'installation qui est le gestionnaire de *packages* pour Python. Dans les *notebooks*, vous pouvez exécuter des commandes système en les faisant précéder du caractère `!`. Vous pouvez donc installer la bibliothèque 🤗 *Transformers* comme suit : ``` !pip install transformers ``` Vous pouvez vous assurer que le paquet a été correctement installé en l'important dans votre runtime Python : ``` import transformers ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="A gif showing the result of the two commands above: installation and import" width="80%"/> </div> Cela installe une version très légère de 🤗 *Transformers*. En particulier, aucun *framework* d'apprentissage automatique spécifique (comme PyTorch ou TensorFlow) n'est installé. Comme nous utiliserons de nombreuses fonctionnalités différentes de la bibliothèque, nous recommandons d'installer la version de développement qui est livrée avec toutes les dépendances requises pour à peu près tous les cas d'utilisation imaginables : ``` !pip install transformers[sentencepiece] ``` Cela prendra un peu de temps, mais vous serez alors prêt pour le reste du cours ! ## Utilisation d'un environnement virtuel Python Si vous préférez utiliser un environnement virtuel Python, la première étape consiste à installer Python sur votre système. Nous vous recommandons de suivre [ce guide](https://realpython.com/installing-python/) pour commencer. Une fois Python installé, vous devriez être en mesure d'exécuter des commandes Python dans votre terminal. Vous pouvez commencer par exécuter la commande suivante pour vous assurer qu'il est correctement installé avant de passer aux étapes suivantes : `python --version`. Cette commande devrait vous indiquer la version de Python disponible sur votre système. Lorsque vous exécutez une commande Python dans votre terminal, comme `python --version`, vous devez considérer le programme qui exécute votre commande comme la fonction « main » Python sur votre système. Nous vous recommandons de garder cette installation principale libre de tout *package* et de l'utiliser pour créer des environnements séparés pour chaque application sur laquelle vous travaillez. De cette façon, chaque application peut avoir ses propres dépendances et *packages*, et vous n'aurez pas à vous soucier de problèmes potentiels de compatibilité avec d'autres applications. En Python, cela se fait avec les [*environnements virtuels*](https://docs.python.org/3/tutorial/venv.html), qui sont des arbres de répertoires autonomes contenant chacun une installation Python avec une version particulière de Python ainsi que tous les *packages* dont l'application a besoin. La création d'un tel environnement virtuel peut se faire à l'aide d'un certain nombre d'outils différents, mais nous utiliserons le *package* officiel de Python : [`venv`](https://docs.python.org/3/library/venv.html#module-venv). Tout d'abord, créez le répertoire dans lequel vous souhaitez que votre application se trouve. Par exemple, vous pouvez créer un nouveau répertoire appelé *transformers-course* à la racine de votre répertoire personnel : ``` mkdir ~/transformers-course cd ~/transformers-course ``` A l'intérieur de ce répertoire, créez un environnement virtuel en utilisant le module Python `venv` : ``` python -m venv .env ``` Vous devriez maintenant avoir un répertoire appelé *.env* dans votre dossier autrement vide : ``` ls -a ``` ```out . .. .env ``` Vous pouvez entrer et sortir de votre environnement virtuel avec les scripts `activate` et `deactivate` : ``` # Activate the virtual environment source .env/bin/activate # Deactivate the virtual environment source .env/bin/deactivate ``` Vous pouvez vous assurer que l'environnement est activé en exécutant la commande `which python` : si elle pointe vers l'environnement virtuel, alors vous l'avez activé avec succès ! ``` which python ``` ```out /home/<user>/transformers-course/.env/bin/python ``` ### Installation des dépendances Comme dans la section précédente sur l'utilisation des instances Google Colab, vous devez maintenant installer les *packages* requis pour continuer. Encore une fois, vous pouvez installer la version de développement de 🤗 *Transformers* à l'aide du gestionnaire de packages `pip` : ``` pip install "transformers[sentencepiece]" ``` Vous êtes maintenant prêt !
course/chapters/fr/chapter0/1.mdx/0
{ "file_path": "course/chapters/fr/chapter0/1.mdx", "repo_id": "course", "token_count": 2316 }
113
<FrameworkSwitchCourse {fw} /> # Tout assembler {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "English", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_pt.ipynb"}, {label: "Français", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/fr/chapter2/section6_pt.ipynb"}, {label: "English", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_pt.ipynb"}, {label: "Français", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/fr/chapter2/section6_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "English", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_tf.ipynb"}, {label: "Français", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/fr/chapter2/section6_tf.ipynb"}, {label: "English", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section6_tf.ipynb"}, {label: "Français", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/fr/chapter2/section6_tf.ipynb"}, ]} /> {/if} Dans les dernières sections, nous avons fait de notre mieux pour effectuer la plupart du travail manuellement. Nous avons exploré le fonctionnement des *tokenizers* et examiné la tokenisation, la conversion en identifiants d'entrée, le *padding*, la troncature et les masques d'attention. Cependant, comme nous l'avons vu dans la section 2, l'API 🤗 *Transformers* peut gérer tout cela pour nous via une fonction dans laquelle nous allons nous plonger ici. Lorsque vous appelez votre `tokenizer` directement sur la phrase, vous récupérez des entrées qui sont prêtes à être passées dans votre modèle : ```py from transformers import AutoTokenizer checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." # J'ai attendu un cours d’HuggingFace toute ma vie. model_inputs = tokenizer(sequence) ``` Ici, la variable `model_inputs` contient tout ce qui est nécessaire au bon fonctionnement d'un modèle. Pour DistilBERT, cela inclut les identifiants d'entrée ainsi que le masque d'attention. D'autres modèles qui acceptent des entrées supplémentaires sont également fournis par l'objet `tokenizer`. Comme nous allons le voir dans les quelques exemples ci-dessous, cette méthode est très puissante. Premièrement, elle peut tokeniser une seule séquence : ```py sequence = "I've been waiting for a HuggingFace course my whole life." # J'ai attendu un cours d’HuggingFace toute ma vie. model_inputs = tokenizer(sequence) ``` Elle gère également plusieurs séquences à la fois, sans modification de l'API : ```py sequences = [ "I've been waiting for a HuggingFace course my whole life.", "So have I!", ] # « J'ai attendu un cours de HuggingFace toute ma vie. », « Moi aussi ! » model_inputs = tokenizer(sequences) ``` Il est possible de faire du *padding* selon plusieurs objectifs : ```py # Remplit les séquences jusqu'à la longueur maximale de la séquence model_inputs = tokenizer(sequences, padding="longest") # Remplit les séquences jusqu'à la longueur maximale du modèle (512 pour BERT ou DistilBERT) model_inputs = tokenizer(sequences, padding="max_length") # Remplit les séquences jusqu'à la longueur maximale spécifiée model_inputs = tokenizer(sequences, padding="max_length", max_length=8) ``` La fonction peut également tronquer les séquences : ```py sequences = [ "I've been waiting for a HuggingFace course my whole life.", "So have I!", ] # « J'ai attendu un cours de HuggingFace toute ma vie. », « Moi aussi ! » # Tronque les séquences qui sont plus longues que la longueur maximale du modèle # (512 pour BERT ou DistilBERT) model_inputs = tokenizer(sequences, truncation=True) # Tronque les séquences qui sont plus longues que la longueur maximale spécifiée model_inputs = tokenizer(sequences, max_length=8, truncation=True) ``` L'objet `tokenizer` peut gérer la conversion en des tenseurs de *frameworks* spécifiques. Ils peuvent ensuite être directement envoyés au modèle. Par exemple, dans le code suivant, nous demandons au *tokenizer* de retourner des tenseurs PyTorch lorsque l’on spécifie `"pt"`, de retourner des tenseurs TensorFlow lorsque l’on spécifie `"tf"` et des tableaux NumPy lorsque l’on indique `"np"` : ```py sequences = [ "I've been waiting for a HuggingFace course my whole life.", "So have I!", ] # « J'ai attendu un cours de HuggingFace toute ma vie. », « Moi aussi ! » # Retourne des tenseurs PyTorch model_inputs = tokenizer(sequences, padding=True, return_tensors="pt") # Retourne des tenseurs TensorFlow model_inputs = tokenizer(sequences, padding=True, return_tensors="tf") # Retourne des tableaux NumPy model_inputs = tokenizer(sequences, padding=True, return_tensors="np") ``` ## Jetons spéciaux Si nous jetons un coup d'œil aux identifiants d'entrée renvoyés par le *tokenizer*, nous verrons qu'ils sont un peu différents de ceux que nous avions précédemment : ```py sequence = "I've been waiting for a HuggingFace course my whole life." # « J'ai attendu un cours de HuggingFace toute ma vie. » model_inputs = tokenizer(sequence) print(model_inputs["input_ids"]) tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) ``` ```python out [101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102] [1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012] ``` Un identifiant symbolique a été ajouté au début ainsi qu’un autre à la fin. Décodons les deux séquences d'identifiants ci-dessus pour voir de quoi il s'agit : ```py print(tokenizer.decode(model_inputs["input_ids"])) print(tokenizer.decode(ids)) ``` ```python out "[CLS] i've been waiting for a huggingface course my whole life. [SEP]" "i've been waiting for a huggingface course my whole life." ``` Le *tokenizer* a ajouté le mot spécial `[CLS]` au début et le mot spécial `[SEP]` à la fin. C'est parce que le modèle a été pré-entraîné avec ces mots, donc pour obtenir les mêmes résultats pour l'inférence, nous devons également les ajouter. Notez que certains modèles n'ajoutent pas de mots spéciaux, ou en ajoutent des différents. Les modèles peuvent aussi ajouter ces mots spéciaux seulement au début, ou seulement à la fin. Dans tous les cas, le *tokenizer* sait lesquels sont attendus et s'en occupe pour vous. ## Conclusion : du <i>tokenizer</i> au modèle Maintenant que nous avons vu toutes les étapes individuelles que l'objet `tokenizer` utilise lorsqu'il est appliqué sur des textes, voyons une dernière fois comment il peut gérer plusieurs séquences (*padding*), de très longues séquences (*troncation*) et plusieurs types de tenseurs avec son API principale : {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequences = [ "I've been waiting for a HuggingFace course my whole life.", "So have I!", ] # « J'ai attendu un cours de HuggingFace toute ma vie. », « Moi aussi ! » tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt") output = model(**tokens) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequences = [ "I've been waiting for a HuggingFace course my whole life.", "So have I!", ] # « J'ai attendu un cours de HuggingFace toute ma vie. », « Moi aussi ! » tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="tf") output = model(**tokens) ``` {/if}
course/chapters/fr/chapter2/6.mdx/0
{ "file_path": "course/chapters/fr/chapter2/6.mdx", "repo_id": "course", "token_count": 3124 }
114
# Introduction <CourseFloatingBanner chapter={5} classNames="absolute z-10 right-0 top-0" /> Dans le [chapitre 3](/course/fr/chapter3) vous avez eu un premier aperçu de la bibliothèque 🤗 *Datasets* et des trois étapes principales pour *finetuner* un modèle : 1. chargement d'un jeu de données à partir du *Hub* d’Hugging Face, 2. prétraitement des données avec `Dataset.map()`, 3. chargement et calcul des métriques. Mais ce n'est qu'effleurer la surface de ce que 🤗 *Datasets* peut faire ! Dans ce chapitre, nous allons plonger profondément dans cette bibliothèque. En cours de route, nous trouverons des réponses aux questions suivantes : * que faire lorsque votre jeu de données n'est pas sur le *Hub* ? * comment découper et trancher un jeu de données ? (Et si on a _vraiment_ besoin d'utiliser Pandas ?) * que faire lorsque votre jeu de données est énorme et va monopoliser la RAM de votre ordinateur portable ? * qu'est-ce que c'est que le « *memory mapping* » et Apache Arrow ? * comment créer votre propre jeu de données et le pousser sur le *Hub* ? Les techniques apprises dans ce chapitre vous prépareront aux tâches avancées de tokenisation du [chapitre 6](/course/fr/chapter6) et de *finetuning* du [chapitre 7](/course/fr/chapter7). Alors prenez un café et commençons !
course/chapters/fr/chapter5/1.mdx/0
{ "file_path": "course/chapters/fr/chapter5/1.mdx", "repo_id": "course", "token_count": 484 }
115
# Tokenisation <i>Unigram</i> <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "English", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter6/section7.ipynb"}, {label: "Français", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/fr/chapter6/section7.ipynb"}, {label: "English", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter6/section7.ipynb"}, {label: "Français", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/fr/chapter6/section7.ipynb"}, ]} /> L'algorithme *Unigram* est souvent utilisé dans *SentencePiece*, qui est l'algorithme de tokenization utilisé par des modèles comme ALBERT, T5, mBART, Big Bird et XLNet. <Youtube id="TGZfZVuF9Yc"/> <Tip> 💡 Cette section couvre *Unigram* en profondeur, allant jusqu'à montrer une implémentation complète. Vous pouvez passer directement à la fin si vous souhaitez simplement avoir un aperçu général de l'algorithme de tokénisation. </Tip> ## Algorithme d'entraînement Comparé au BPE et *WordPiece*, *Unigram* fonctionne dans l'autre sens : il part d'un grand vocabulaire et enlève des *tokens* jusqu'à atteindre la taille de vocabulaire désirée. Il existe plusieurs options pour construire ce vocabulaire de base. Nous pouvons par exemple prendre les sous-chaînes les plus courantes dans les mots prétokénisés ou appliquer le BPE sur le corpus initial avec une grande taille de vocabulaire. À chaque étape de l'entraînement, l'algorithme *Unigram* calcule une perte sur le corpus compte tenu du vocabulaire actuel. Ensuite, pour chaque symbole du vocabulaire, l'algorithme calcule de combien la perte globale augmenterait si le symbole était supprimé et recherche les symboles qui l'augmenteraient le moins. Ces symboles ont un effet moindre sur la perte globale du corpus, ils sont donc en quelque sorte « moins nécessaires » et sont les meilleurs candidats à la suppression. Comme il s'agit d'une opération très coûteuse, nous ne nous contentons pas de supprimer le symbole unique associé à la plus faible augmentation de la perte mais le \\(p\\) pourcent des symboles associés à la plus faible augmentation de la perte. \(p\\) est un hyperparamètre que vous pouvez contrôler, valant généralement 10 ou 20. Ce processus est ensuite répété jusqu'à ce que le vocabulaire ait atteint la taille souhaitée. Notez que nous ne supprimons jamais les caractères de base, afin de nous assurer que tout mot peut être tokenisé. Tout ceci peut paraître encore un peu vague. En effet, la partie principale de l'algorithme est de calculer une perte sur le corpus et de voir comment elle change lorsque nous supprimons certains *tokens* du vocabulaire mais nous n'avons pas encore expliqué comment le faire. Cette étape repose sur l'algorithme de tokénisation *Unigram*, nous allons donc l'aborder à présent. Nous allons réutiliser le corpus des exemples précédents : ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) # "câlin", "carlin", "jeu de mots", "brioche", "câlins"... ``` et pour cet exemple, nous prendrons toutes les sous-chaînes strictes pour le vocabulaire initial : ``` ["h", "u", "g", "hu", "ug", "p", "pu", "n", "un", "b", "bu", "s", "hug", "gs", "ugs"] ``` ## Algorithme de tokenisation Un modèle *Unigram* est un type de modèle de langage qui considère que chaque *token* est indépendant des *tokens* qui le précèdent. Il s'agit du modèle de langage le plus simple, dans le sens où la probabilité du *token* X compte tenu du contexte précédent est simplement la probabilité du *token* X. Ainsi, si nous utilisions un modèle de langage *Unigram* pour générer du texte, nous prédirions toujours le *token* le plus courant. La probabilité d'un *token* donné est sa fréquence (le nombre de fois que nous le trouvons) dans le corpus original, divisée par la somme de toutes les fréquences de tous les *tokens* dans le vocabulaire (pour s'assurer que la somme des probabilités est égale à 1). Par exemple, `"ug"` est présent dans `"hug"`, `"pug"`, et `"hugs"`. Il a donc une fréquence de 20 dans notre corpus. Voici les fréquences de tous les sous-mots possibles dans le vocabulaire : ``` ("h", 15) ("u", 36) ("g", 20) ("hu", 15) ("ug", 20) ("p", 17) ("pu", 17) ("n", 16) ("un", 16) ("b", 4) ("bu", 4) ("s", 5) ("hug", 15) ("gs", 5) ("ugs", 5) ``` Ainsi, la somme de toutes les fréquences est de 210 et la probabilité du sous-mot `"ug"` est donc de 20/210. <Tip> ✏️ **A votre tour !** Ecrivez le code permettant de calculer les fréquences ci-dessus et vérifiez que les résultats affichés sont corrects, de même que la somme totale. </Tip> Maintenant, pour tokeniser un mot donné, nous examinons toutes les segmentations possibles en *tokens* et calculons la probabilité de chacune d'entre elles selon le modèle *Unigram*. Puisque tous les *tokens* sont considérés comme indépendants, cette probabilité est juste le produit de la probabilité de chaque *token*. Par exemple, la tokenisation `["p", "u", "g"]` de `"pug"` a la probabilité : $$P([``p", ``u", ``g"]) = P(``p") \times P(``u") \times P(``g") = \frac{5}{210} \times \frac{36}{210} \times \frac{20}{210} = 0.000389$$ Comparativement, la tokenization `["pu", "g"]` a la probabilité : $$P([``pu", ``g"]) = P(``pu") \times P(``g") = \frac{5}{210} \times \frac{20}{210} = 0.0022676$$ donc celle-là est beaucoup plus probable. En général, les tokénisations comportant le moins de *tokens* possible auront la probabilité la plus élevée (en raison de la division par 210 répétée pour chaque *token*), ce qui correspond à ce que nous voulons intuitivement : diviser un mot en un nombre de *tokens* le plus faible possible. La tokenisation d'un mot avec le modèle *Unigram* est donc la tokenisation avec la plus haute probabilité. Dans l'exemple de `"pug"`, voici les probabilités que nous obtiendrions pour chaque segmentation possible : ``` ["p", "u", "g"] : 0.000389 ["p", "ug"] : 0.0022676 ["pu", "g"] : 0.0022676 ``` Ainsi, `"pug"` sera tokenisé comme `["p", "ug"]` ou `["pu", "g"]`, selon la segmentation rencontrée en premier (notez que dans un corpus plus large, les cas d'égalité comme celui-ci seront rares). Dans ce cas-ci, cela a été facile de trouver toutes les segmentations possibles et de calculer leurs probabilités, mais en général ce sera un peu plus difficile. Il existe un algorithme classique utilisé pour cela, appelé *algorithme de Viterbi*. Essentiellement, on peut construire un graphe pour détecter les segmentations possibles d'un mot donné en disant qu'il existe une branche du caractère _a_ au caractère _b_ si le sous-mot de _a_ à _b_ est dans le vocabulaire, et attribuer à cette branche la probabilité du sous-mot. Pour trouver le chemin qui va avoir le meilleur score dans ce graphe, l'algorithme de Viterbi détermine, pour chaque position dans le mot, la segmentation avec le meilleur score qui se termine à cette position. Puisque nous allons du début à la fin, ce meilleur score peut être trouvé en parcourant en boucle tous les sous-mots se terminant à la position actuelle, puis en utilisant le meilleur score de tokenization de la position à laquelle ce sous-mot commence. Ensuite, il suffit de dérouler le chemin emprunté pour arriver à la fin. Prenons un exemple en utilisant notre vocabulaire et le mot `"unhug"`. Pour chaque position, les sous-mots avec les meilleurs scores se terminant là sont les suivants : ``` Character 0 (u): "u" (score 0.171429) Character 1 (n): "un" (score 0.076191) Character 2 (h): "un" "h" (score 0.005442) Character 3 (u): "un" "hu" (score 0.005442) Character 4 (g): "un" "hug" (score 0.005442) ``` Ainsi, `"unhug"` serait tokenisé comme `["un", "hug"]`. <Tip> ✏️ **A votre tour !** Déterminer la tokenization du mot `"huggun"` et son score. </Tip> ## Retour à l'entraînement Maintenant que nous avons vu comment fonctionne la tokenisation, nous pouvons nous plonger un peu plus profondément dans la perte utilisée pendant l'entraînement. À n'importe quelle étape, cette perte est calculée en tokenisant chaque mot du corpus, en utilisant le vocabulaire courant et le modèle *Unigram* déterminé par les fréquences de chaque *token* dans le corpus (comme vu précédemment). Chaque mot du corpus a un score, et la perte est le négatif du logarithme de ces scores, c'est-à-dire la somme pour tous les mots du corpus de tous les `-log(P(word))`. Revenons à notre exemple avec le corpus suivant : ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) ``` La tokenisation de chaque mot avec leurs scores respectifs est : ``` "hug": ["hug"] (score 0.071428) "pug": ["pu", "g"] (score 0.007710) "pun": ["pu", "n"] (score 0.006168) "bun": ["bu", "n"] (score 0.001451) "hugs": ["hug", "s"] (score 0.001701) ``` Donc la perte est : ``` 10 * (-log(0.071428)) + 5 * (-log(0.007710)) + 12 * (-log(0.006168)) + 4 * (-log(0.001451)) + 5 * (-log(0.001701)) = 169.8 ``` Maintenant, nous devons calculer comment la suppression de chaque token affecte la perte. C'est plutôt fastidieux, donc nous allons le faire pour deux *tokens* ici et garder tout le processus pour quand nous aurons du code pour nous aider. Dans ce cas (très) particulier, nous avions deux tokenizations équivalentes de tous les mots. Par exmeple, comme nous l'avons vu précédemment, `"pug"` pourrait être tokenisé en `["p", "ug"]` avec le même score. Ainsi, enlever le token `"pu"` du vocabulaire donnera exactement la même perte. D'un autre côté, supprimer le mot `"hug"` aggravera la perte, car la tokenisation de `"hug"` et `"hugs"` deviendra : ``` "hug": ["hu", "g"] (score 0.006802) "hugs": ["hu", "gs"] (score 0.001701) ``` Ces changements entraîneront une augmentation de la perte de : ``` - 10 * (-log(0.071428)) + 10 * (-log(0.006802)) = 23.5 ``` Par conséquent, le token `"pu"` sera probablement retiré du vocabulaire, mais pas `"hug"`. ## Implémentation d'<i>Unigram</i> Maintenant, implémentons tout ce que nous avons vu jusqu'à présent dans le code. Comme pour le BPE et *WordPiece*, ce n'est pas une implémentation efficace de l'algorithme *Unigram* (bien au contraire), mais elle devrait vous aider à le comprendre un peu mieux. Nous allons utiliser le même corpus que précédemment comme exemple : ```python corpus = [ "This is the Hugging Face Course.", # C'est le cours d'Hugging Face. "This chapter is about tokenization.", # Ce chapitre traite de la tokenisation. "This section shows several tokenizer algorithms.", # Cette section présente plusieurs algorithmes de *tokenizer*. "Hopefully, you will be able to understand how they are trained and generate tokens.", # Avec un peu de chance, vous serez en mesure de comprendre comment ils sont entraînés et génèrent des *tokens*. ] ``` Cette fois, nous allons utiliser `xlnet-base-cased` comme modèle : ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("xlnet-base-cased") ``` Comme pour le BPE et *WordPiece*, nous commençons par compter le nombre d'occurrences de chaque mot dans le corpus : ```python from collections import defaultdict word_freqs = defaultdict(int) for text in corpus: words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) new_words = [word for word, offset in words_with_offsets] for word in new_words: word_freqs[word] += 1 word_freqs ``` Ensuite, nous devons initialiser notre vocabulaire à une taille plus grande que celle du vocabulaire que nous voudrons à la fin. Nous devons inclure tous les caractères de base (sinon nous ne serons pas en mesure de tokeniser chaque mot), mais pour les sous-chaînes plus grandes, nous ne garderons que les plus communs. AInsi nous les trions par fréquence : ```python char_freqs = defaultdict(int) subwords_freqs = defaultdict(int) for word, freq in word_freqs.items(): for i in range(len(word)): char_freqs[word[i]] += freq # Boucle à travers les sous-mots de longueur au moins égale à 2 for j in range(i + 2, len(word) + 1): subwords_freqs[word[i:j]] += freq # Trier les sous-mots par fréquence sorted_subwords = sorted(subwords_freqs.items(), key=lambda x: x[1], reverse=True) sorted_subwords[:10] ``` ```python out [('▁t', 7), ('is', 5), ('er', 5), ('▁a', 5), ('▁to', 4), ('to', 4), ('en', 4), ('▁T', 3), ('▁Th', 3), ('▁Thi', 3)] ``` Nous regroupons les caractères avec les meilleurs sous-mots pour arriver à un vocabulaire initial de taille 300 : ```python token_freqs = list(char_freqs.items()) + sorted_subwords[: 300 - len(char_freqs)] token_freqs = {token: freq for token, freq in token_freqs} ``` <Tip> 💡 *SentencePiece* utilise un algorithme plus efficace appelé *Enhanced Suffix Array* (ESA) pour créer le vocabulaire initial. </Tip> Ensuite, nous calculons la somme de toutes les fréquences, pour convertir les fréquences en probabilités. Pour notre modèle, nous allons stocker les logarithmes des probabilités, car c'est plus stable numériquement d'additionner des logarithmes que de multiplier des petits nombres. Cela simplifiera aussi le calcul de la perte du modèle : ```python from math import log total_sum = sum([freq for token, freq in token_freqs.items()]) model = {token: -log(freq / total_sum) for token, freq in token_freqs.items()} ``` Maintenant la fonction principale est celle qui tokenise les mots en utilisant l'algorithme de Viterbi. Comme nous l'avons vu précédemment, cet algorithme calcule la meilleure segmentation de chaque sous-chaîne du mot que nous allons stocker dans une variable nommée `best_segmentations`. Nous allons stocker un dictionnaire par position dans le mot (de 0 à sa longueur totale), avec deux clés : l'index du début du dernier *token* dans la meilleure segmentation et le score de la meilleure segmentation. Avec l'index du début du dernier *token*, nous serons en mesure de récupérer la segmentation complète une fois que la liste est complètement remplie. Le remplissage de la liste se fait à l'aide de deux boucles seulement : la boucle principale passe en revue chaque position de départ et la seconde boucle essaie toutes les sous-chaînes commençant à cette position de départ. Si la sous-chaîne est dans le vocabulaire, nous avons une nouvelle segmentation du mot jusqu'à cette position finale que nous comparons à ce qui est dans `best_segmentations`. Une fois que la boucle principale est terminée, nous commençons juste à la fin et sautons d'une position de départ à une autre, en enregistrant les *tokens* au fur et à mesure, jusqu'à ce que nous atteignions le début du mot : ```python def encode_word(word, model): best_segmentations = [{"start": 0, "score": 1}] + [ {"start": None, "score": None} for _ in range(len(word)) ] for start_idx in range(len(word)): # Doit être correctement rempli par les étapes précédentes de la boucle best_score_at_start = best_segmentations[start_idx]["score"] for end_idx in range(start_idx + 1, len(word) + 1): token = word[start_idx:end_idx] if token in model and best_score_at_start is not None: score = model[token] + best_score_at_start # Si nous avons trouvé une meilleure segmentation se terminant à end_idx, nous mettons à jour if ( best_segmentations[end_idx]["score"] is None or best_segmentations[end_idx]["score"] > score ): best_segmentations[end_idx] = {"start": start_idx, "score": score} segmentation = best_segmentations[-1] if segmentation["score"] is None: # Nous n'avons pas trouvé de tokenization du mot -> inconnu (<unk>) return ["<unk>"], None score = segmentation["score"] start = segmentation["start"] end = len(word) tokens = [] while start != 0: tokens.insert(0, word[start:end]) next_start = best_segmentations[start]["start"] end = start start = next_start tokens.insert(0, word[start:end]) return tokens, score ``` Nous pouvons déjà essayer notre modèle initial sur quelques mots : ```python print(encode_word("Hopefully", model)) print(encode_word("This", model)) ``` ```python out (['H', 'o', 'p', 'e', 'f', 'u', 'll', 'y'], 41.5157494601402) (['This'], 6.288267030694535) ``` Il est maintenant facile de calculer la perte du modèle sur le corpus ! ```python def compute_loss(model): loss = 0 for word, freq in word_freqs.items(): _, word_loss = encode_word(word, model) loss += freq * word_loss return loss ``` Nous pouvons vérifier que cela fonctionne sur le modèle que nous avons : ```python compute_loss(model) ``` ```python out 413.10377642940875 ``` Le calcul des scores pour chaque *token* n'est pas très difficile non plus. Il suffit de calculer la perte pour les modèles obtenus en supprimant chaque *token* : ```python import copy def compute_scores(model): scores = {} model_loss = compute_loss(model) for token, score in model.items(): # Nous gardons toujours les tokens de longueur 1. if len(token) == 1: continue model_without_token = copy.deepcopy(model) _ = model_without_token.pop(token) scores[token] = compute_loss(model_without_token) - model_loss return scores ``` Nous pouvons l'essayer sur un *token* donné : ```python scores = compute_scores(model) print(scores["ll"]) print(scores["his"]) ``` Puisque `"ll"` est utilisé dans la tokenisation de `"Hopefully"`, et que le supprimer nous fera probablement utiliser le token `"l"` deux fois à la place, nous nous attendons à ce qu'il ait une perte positive. `"his"` n'est utilisé qu'à l'intérieur du mot `"This"`, qui est tokenisé comme lui-même, donc nous nous attendons à ce qu'il ait une perte nulle. Voici les résultats : ```python out 6.376412403623874 0.0 ``` <Tip> 💡 Cette approche est très inefficace, c'est pourquoi *SentencePiece* utilise une approximation de la perte du modèle sans le *token* X. Au lieu de partir de zéro, il remplace simplement le *token* X par sa segmentation dans le vocabulaire restant. De cette façon, tous les scores peuvent être calculés en une seule fois, en même temps que la perte du modèle. </Tip> Une fois tout cela en place, la dernière chose à faire est d'ajouter les *tokens* spéciaux utilisés par le modèle au vocabulaire, puis de boucler jusqu'à ce que nous ayons élagué suffisamment de *tokens* du vocabulaire pour atteindre la taille souhaitée : ```python percent_to_remove = 0.1 while len(model) > 100: scores = compute_scores(model) sorted_scores = sorted(scores.items(), key=lambda x: x[1]) # Supprime les tokens percent_to_remove ayant les scores les plus bas for i in range(int(len(model) * percent_to_remove)): _ = token_freqs.pop(sorted_scores[i][0]) total_sum = sum([freq for token, freq in token_freqs.items()]) model = {token: -log(freq / total_sum) for token, freq in token_freqs.items()} ``` Ensuite, pour tokeniser un texte, il suffit d'appliquer la prétokénisation et d'utiliser la fonction `encode_word()` : ```python def tokenize(text, model): words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) pre_tokenized_text = [word for word, offset in words_with_offsets] encoded_words = [encode_word(word, model)[0] for word in pre_tokenized_text] return sum(encoded_words, []) tokenize("This is the Hugging Face course.", model) ``` ```python out ['▁This', '▁is', '▁the', '▁Hugging', '▁Face', '▁', 'c', 'ou', 'r', 's', 'e', '.'] ``` C'est tout pour *Unigram* ! Avec un peu de chance, vous vous sentez à présent être un expert des *tokenizers*. Dans la prochaine section, nous allons nous plonger dans les blocs de construction de la bibliothèque 🤗 *Tokenizers* et allons vous montrer comment vous pouvez les utiliser pour construire votre propre *tokenizer*.
course/chapters/fr/chapter6/7.mdx/0
{ "file_path": "course/chapters/fr/chapter6/7.mdx", "repo_id": "course", "token_count": 7734 }
116
<FrameworkSwitchCourse {fw} /> # Débogage du pipeline d'entraînement <CourseFloatingBanner chapter={8} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "English", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter8/section4_tf.ipynb"}, {label: "Français", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/fr/chapter8/section4_tf.ipynb"}, {label: "English", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter8/section4_tf.ipynb"}, {label: "Français", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/fr/chapter8/section4_tf.ipynb"}, ]} /> Vous avez écrit un magnifique script pour entraîner ou *finetuner* un modèle sur une tâche donnée en suivant consciencieusement les conseils du [chapitre 7](/course/fr/chapter7). Mais lorsque vous lancez la commande `model.fit()`, quelque chose d'horrible se produit : vous obtenez une erreur 😱 ! Ou pire, tout semble aller bien et l'entraînement se déroule sans erreur mais le modèle résultant est mauvais. Dans cette section, nous allons vous montrer ce que vous pouvez faire pour déboguer ce genre de problèmes. ## Déboguer le pipeline d'entraînement <Youtube id="N9kO52itd0Q"/> Le problème lorsque vous rencontrez une erreur dans `trainer.train()` est qu'elle peut provenir de plusieurs sources, car la fonction `Trainer` assemble généralement des batchs de choses. Elle convertit les jeux de données en chargeurs de données donc le problème pourrait être quelque chose d'erroné dans votre jeu de données, ou un problème en essayant de regrouper les éléments des jeux de données ensemble. Ensuite, elle prend un batch de données et le transmet au modèle, le problème peut donc se situer dans le code du modèle. Après cela, elle calcule les gradients et effectue l'étape d'optimisation, le problème peut donc également se situer dans votre optimiseur. Et même si tout se passe bien pendant l'entraînement, quelque chose peut encore mal tourner pendant l'évaluation si votre métrique pose problème. La meilleure façon de déboguer une erreur qui survient dans `trainer.train()` est de passer manuellement en revue tout le pipeline pour voir où les choses se sont mal passées. L'erreur est alors souvent très facile à résoudre. Pour le démontrer, nous utiliserons le script suivant qui tente de *finetuner* un modèle DistilBERT sur le [jeu de données MNLI](https://huggingface.co/datasets/glue) : ```py from datasets import load_dataset import evaluate from transformers import ( AutoTokenizer, TFAutoModelForSequenceClassification, ) raw_datasets = load_dataset("glue", "mnli") model_checkpoint = "distilbert-base-uncased" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) def preprocess_function(examples): return tokenizer(examples["premise"], examples["hypothesis"], truncation=True) tokenized_datasets = raw_datasets.map(preprocess_function, batched=True) train_dataset = tokenized_datasets["train"].to_tf_dataset( columns=["input_ids", "labels"], batch_size=16, shuffle=True ) validation_dataset = tokenized_datasets["validation_matched"].to_tf_dataset( columns=["input_ids", "labels"], batch_size=16, shuffle=True ) model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint) model.compile(loss="sparse_categorical_crossentropy", optimizer="adam") model.fit(train_dataset) ``` Si vous essayez de l'exécuter, il se peut que vous obteniez des `VisibleDeprecationWarning`s lors de la conversion du jeu de données. Il s'agit d'un problème UX connu par l'équipe d'Hugging Face, donc veuillez l'ignorer. Si vous lisez le cours après novembre 2021 et que cela se produit encore, envoyez des tweets de rage à @carrigmat jusqu'à ce qu'il le corrige. Le problème cependant est que nous avons une erreur flagrante. Et c'est vraiment, terriblement long : ```python out ValueError: No gradients provided for any variable: ['tf_distil_bert_for_sequence_classification/distilbert/embeddings/word_embeddings/weight:0', '...'] ``` Qu'est-ce que cela signifie ? Nous avons essayé d'entraîner sur nos données mais nous n'avons pas obtenu de gradient. C'est assez déconcertant. Comment commencer à déboguer quelque chose comme ça ? Lorsque l'erreur que vous obtenez ne suggère pas immédiatement l'origine du problème, la meilleure solution consiste souvent à procéder par étapes, en s'assurant à chaque fois que tout semble correct. Et bien sûr, il faut toujours commencer par... ### Vérifier vos données Cela va sans dire, mais si vos données sont corrompues, Keras ne sera pas en mesure de les réparer pour vous. Avant toute chose, vous devez donc jeter un coup d'œil à ce que contient votre ensemble d'entraînement. Bien qu'il soit tentant de regarder dans `raw_datasets` et `tokenized_datasets`, nous vous recommandons fortement d'aller voir les données au moment où elles vont entrer dans le modèle. Cela signifie lire une sortie du `tf.data.Dataset` que vous avez créé avec la fonction `to_tf_dataset()` ! Alors comment faire ? Les objets `tf.data.Dataset` nous donnent des batchs entiers à la fois et ne supportent pas l'indexation, donc nous ne pouvons pas simplement demander `train_dataset[0]`. Nous pouvons, cependant, lui demander poliment un batch : ```py for batch in train_dataset: break ``` `break` termine la boucle après une itération, donc cela prend le premier batch qui sort de `train_dataset` et l'enregistre comme `batch`. Maintenant, jetons un coup d'oeil à ce qu'il y a à l'intérieur : ```python out {'attention_mask': <tf.Tensor: shape=(16, 76), dtype=int64, numpy= array([[1, 1, 1, ..., 0, 0, 0], [1, 1, 1, ..., 0, 0, 0], [1, 1, 1, ..., 0, 0, 0], ..., [1, 1, 1, ..., 1, 1, 1], [1, 1, 1, ..., 0, 0, 0], [1, 1, 1, ..., 0, 0, 0]])>, 'label': <tf.Tensor: shape=(16,), dtype=int64, numpy=array([0, 2, 1, 2, 1, 1, 2, 0, 0, 0, 1, 0, 1, 2, 2, 1])>, 'input_ids': <tf.Tensor: shape=(16, 76), dtype=int64, numpy= array([[ 101, 2174, 1010, ..., 0, 0, 0], [ 101, 3174, 2420, ..., 0, 0, 0], [ 101, 2044, 2048, ..., 0, 0, 0], ..., [ 101, 3398, 3398, ..., 2051, 2894, 102], [ 101, 1996, 4124, ..., 0, 0, 0], [ 101, 1999, 2070, ..., 0, 0, 0]])>} ``` Cela semble correct. Nous passons les `labels`, `attention_mask`, et `input_ids` au modèle, ce qui devrait être tout ce dont il a besoin pour calculer les sorties et la perte. Alors pourquoi n'avons-nous pas de gradient ? Regardez de plus près : nous passons un seul dictionnaire en entrée mais un batch d'entraînement est généralement un tenseur ou un dictionnaire d'entrée, plus un tenseur d'étiquettes. Nos étiquettes sont juste une clé dans notre dictionnaire d'entrée. Est-ce un problème ? Pas toujours, en fait ! Mais c'est l'un des problèmes les plus courants que vous rencontrerez lorsque vous entraînerez des *transformers* avec TensorFlow. Nos modèles peuvent tous calculer la perte en interne, mais pour ce faire, les étiquettes doivent être transmises dans le dictionnaire d'entrée. C'est la perte qui est utilisée lorsque nous ne spécifions pas de valeur de perte à `compile()`. Keras, d'autre part, s'attend généralement à ce que les étiquettes soient passées séparément du dictionnaire d'entrée, et les calculs de perte échoueront généralement si vous ne le faites pas. Le problème est maintenant devenu plus clair : nous avons passé un argument `loss`, ce qui signifie que nous demandons à Keras de calculer les pertes pour nous, mais nous avons passé nos étiquettes comme entrées au modèle, et non comme étiquettes à l'endroit où Keras les attend ! Nous devons choisir l'un ou l'autre : soit nous utilisons la perte interne du modèle et gardons les étiquettes où elles sont, soit nous continuons à utiliser les pertes de Keras, mais nous déplaçons les étiquettes à l'endroit où Keras les attend. Pour simplifier, prenons la première approche. Changez l'appel à `compile()` pour lire : ```py model.compile(optimizer="adam") ``` Maintenant, nous allons utiliser la perte interne du modèle et ce problème devrait être résolu ! <Tip> ✏️ *A votre tour !* Comme défi optionnel après avoir résolu les autres problèmes, vous pouvez essayer de revenir à cette étape et faire fonctionner le modèle avec la perte originale calculée par Keras au lieu de la perte interne. Vous devrez ajouter `"labels"` à l'argument `label_cols` de `to_tf_dataset()` pour vous assurer que les labels sont correctement sortis, ce qui vous donnera des gradients. Mais il y a un autre problème avec la perte que nous avons spécifiée. L'entraînement fonctionnera toujours avec ce problème mais l'apprentissage sera très lent et se stabilisera à une perte d'entraînement élevée. Pouvez-vous trouver ce que c'est ? Un indice codé en ROT13, si vous êtes coincé : Vs lbh ybbx ng gur bhgchgf bs FrdhraprPynffvsvpngvba zbqryf va Genafsbezref, gurve svefg bhgchg vf `ybtvgf`. Jung ner ybtvgf ? Et un deuxième indice : Jura lbh fcrpvsl bcgvzvmref, npgvingvbaf be ybffrf jvgu fgevatf, Xrenf frgf ny gur nethzrag inyhrf gb gurve qrsnhygf. Jung nethzragf qbrf FcnefrPngrtbevpnyPebffragebcl unir, naq jung ner gurve qrsnhygf ? </Tip> Maintenant, essayons d'entraîner. Nous devrions obtenir des gradients maintenant, donc avec un peu de chance nous pouvons juste appeler `model.fit()` et tout fonctionnera bien ! ```python out 246/24543 [..............................] - ETA: 15:52 - loss: nan ``` Oh non. `nan` n'est pas une valeur de perte très encourageante. Pourtant, nous avons vérifié nos données et elles semblent plutôt bonnes. Si ce n'est pas le problème, quelle est la prochaine étape ? La prochaine étape évidente est de... ### Vérifier votre modèle `model.fit()` est une fonction très pratique dans Keras, mais elle fait beaucoup de choses pour vous. Cela peut rendre plus difficile de trouver exactement où un problème est survenu. Si vous déboguez votre modèle, une stratégie qui peut vraiment vous aider est de passer un seul batch au modèle et d'examiner les sorties de ce batch en détail. Une autre astuce vraiment utile est de `compiler()` le modèle avec `run_eagerly=True`. Cela le rendra beaucoup plus lent mais les messages d'erreur seront beaucoup plus compréhensibles car ils indiqueront exactement où le problème est survenu dans le code de votre modèle. Pour l'instant, cependant, nous n'avons pas besoin de `run_eagerly`. Exécutons le `batch` que nous avons obtenu précédemment à travers le modèle et voyons à quoi ressemblent les résultats : ```py model(batch) ``` ```python out TFSequenceClassifierOutput(loss=<tf.Tensor: shape=(16,), dtype=float32, numpy= array([nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan, nan], dtype=float32)>, logits=<tf.Tensor: shape=(16, 2), dtype=float32, numpy= array([[nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan], [nan, nan]], dtype=float32)>, hidden_states=None, attentions=None) ``` Eh bien, c'est délicat. Tout est "nan" ! Mais c'est étrange, n'est-ce pas ? Comment tous nos logits pourraient-ils devenir `nan` ? "NAN" signifie "*not a number*". Les valeurs `nan` apparaissent souvent quand on effectue une opération interdite comme la division par zéro. Mais une chose très importante à savoir sur `nan` en apprentissage automatique est que cette valeur a tendance à *se propager*. Si vous multipliez un nombre par `nan`, le résultat sera également `nan`. Et si vous obtenez une valeur `nan` n'importe où dans votre sortie, votre perte ou votre gradient, alors elle se propagera rapidement à travers tout votre modèle. Ceci parce que lorsque cette valeur `nan` est propagée à travers votre réseau, vous obtiendrez des gradients `nan`, et lorsque les mises à jour des poids sont calculées avec ces gradients, vous obtiendrez des poids `nan`, et ces poids calculeront encore plus de sorties `nan` ! Très vite, le réseau entier ne sera plus qu'un gros bloc de `nan`. Une fois que cela arrive, il est assez difficile de voir où le problème a commencé. Comment peut-on isoler l'endroit où les `nan` se sont introduits en premier ? La réponse est d'essayer de *reinitialiser* notre modèle. Une fois que nous avons commencé l'entraînement, nous avons eu un `nan` quelque part et il s'est rapidement propagé à travers tout le modèle. Donc, chargeons le modèle à partir d'un checkpoint et ne faisons aucune mise à jour de poids, et voyons où nous obtenons une valeur `nan` : ```py model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint) model(batch) ``` Quand on fait ça, on obtient : ```py out TFSequenceClassifierOutput(loss=<tf.Tensor: shape=(16,), dtype=float32, numpy= array([0.6844486 , nan, nan, 0.67127866, 0.7068601 , nan, 0.69309855, nan, 0.65531296, nan, nan, nan, 0.675402 , nan, nan, 0.69831556], dtype=float32)>, logits=<tf.Tensor: shape=(16, 2), dtype=float32, numpy= array([[-0.04761693, -0.06509043], [-0.0481936 , -0.04556257], [-0.0040929 , -0.05848458], [-0.02417453, -0.0684005 ], [-0.02517801, -0.05241832], [-0.04514256, -0.0757378 ], [-0.02656011, -0.02646275], [ 0.00766164, -0.04350497], [ 0.02060014, -0.05655622], [-0.02615328, -0.0447021 ], [-0.05119278, -0.06928903], [-0.02859691, -0.04879177], [-0.02210129, -0.05791225], [-0.02363213, -0.05962167], [-0.05352269, -0.0481673 ], [-0.08141848, -0.07110836]], dtype=float32)>, hidden_states=None, attentions=None) ``` *Maintenant* on arrive à quelque chose ! Il n'y a pas de valeurs `nan` dans nos logits, ce qui est rassurant. Mais nous voyons quelques valeurs `nan` dans notre perte ! Y a-t-il quelque chose dans ces échantillons en particulier qui cause ce problème ? Voyons de quels échantillons il s'agit (notez que si vous exécutez ce code vous-même, vous pouvez obtenir des indices différents parce que le jeu de données a été mélangé) : ```python import numpy as np loss = model(batch).loss.numpy() indices = np.flatnonzero(np.isnan(loss)) indices ``` ```python out array([ 1, 2, 5, 7, 9, 10, 11, 13, 14]) ``` Examinons les échantillons d'où proviennent ces indices : ```python input_ids = batch["input_ids"].numpy() input_ids[indices] ``` ```python out array([[ 101, 2007, 2032, 2001, 1037, 16480, 3917, 2594, 4135, 23212, 3070, 2214, 10170, 1010, 2012, 4356, 1997, 3183, 6838, 12953, 2039, 2000, 1996, 6147, 1997, 2010, 2606, 1012, 102, 6838, 2001, 3294, 6625, 3773, 1996, 2214, 2158, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 1998, 6814, 2016, 2234, 2461, 2153, 1998, 13322, 2009, 1012, 102, 2045, 1005, 1055, 2053, 3382, 2008, 2016, 1005, 2222, 3046, 8103, 2075, 2009, 2153, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 1998, 2007, 1996, 3712, 4634, 1010, 2057, 8108, 2025, 3404, 2028, 1012, 1996, 2616, 18449, 2125, 1999, 1037, 9666, 1997, 4100, 8663, 11020, 6313, 2791, 1998, 2431, 1011, 4301, 1012, 102, 2028, 1005, 1055, 5177, 2110, 1998, 3977, 2000, 2832, 2106, 2025, 2689, 2104, 2122, 6214, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 1045, 2001, 1999, 1037, 13090, 5948, 2007, 2048, 2308, 2006, 2026, 5001, 2043, 2026, 2171, 2001, 2170, 1012, 102, 1045, 2001, 3564, 1999, 2277, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 2195, 4279, 2191, 2039, 1996, 2181, 2124, 2004, 1996, 2225, 7363, 1012, 102, 2045, 2003, 2069, 2028, 2451, 1999, 1996, 2225, 7363, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 2061, 2008, 1045, 2123, 1005, 1056, 2113, 2065, 2009, 2428, 10654, 7347, 2030, 2009, 7126, 2256, 2495, 2291, 102, 2009, 2003, 5094, 2256, 2495, 2291, 2035, 2105, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 2051, 1010, 2029, 3216, 2019, 2503, 3444, 1010, 6732, 1996, 2265, 2038, 19840, 2098, 2125, 9906, 1998, 2003, 2770, 2041, 1997, 4784, 1012, 102, 2051, 6732, 1996, 2265, 2003, 9525, 1998, 4569, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 1996, 10556, 2140, 11515, 2058, 1010, 2010, 2162, 2252, 5689, 2013, 2010, 7223, 1012, 102, 2043, 1996, 10556, 2140, 11515, 2058, 1010, 2010, 2252, 3062, 2000, 1996, 2598, 1012, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [ 101, 13543, 1999, 2049, 6143, 2933, 2443, 102, 2025, 13543, 1999, 6143, 2933, 2003, 2443, 102, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) ``` Il y a beaucoup de batchs ici mais rien d'inhabituel. Regardons les étiquettes : ```python out labels = batch['labels'].numpy() labels[indices] ``` ```python out array([2, 2, 2, 2, 2, 2, 2, 2, 2]) ``` Ah ! Les échantillons `nan` ont tous le même label. C'est un gros indice. Le fait que nous n'obtenions une perte de `nan` que lorsque notre étiquette vaut 2 suggère que c'est un très bon moment pour vérifier le nombre d'étiquettes dans notre modèle : ```python model.config.num_labels ``` ```python out 2 ``` Nous voyons maintenant le problème : le modèle pense qu'il n'y a que deux classes, mais les étiquettes vont jusqu'à 2, ce qui signifie qu'il y a en fait trois classes (car 0 est aussi une classe). C'est ainsi que nous avons obtenu un `nan`. En essayant de calculer la perte pour une classe inexistante ! Essayons de changer cela et de réajuster le modèle : ``` model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=3) model.compile(optimizer='adam') model.fit(train_dataset) ``` ```python out 869/24543 [>.............................] - ETA: 15:29 - loss: 1.1032 ``` On entraîne ! Plus de `nan` et nos pertes diminuent... en quelque sorte. Si vous regardez pendant un certain temps, vous pouvez commencer à vous impatienter car la valeur des pertes reste obstinément élevée. Arrêtons l'entraînement ici et essayons de réfléchir à ce qui pourrait causer ce problème. À ce stade, nous sommes pratiquement sûrs que les données et le modèle sont corrects, mais notre modèle n'apprend pas bien. Que reste-t-il d'autre ? Il est temps de... ### Vérifier les hyperparamètres Si vous regardez le code ci-dessus, vous ne verrez peut-être aucun hyperparamètre, sauf peut-être le `batch_size` qui ne semble pas être un coupable probable. Cependant, ne soyez pas dupe, il y a toujours des hyperparamètres. Si vous ne pouvez pas les voir, cela signifie simplement que vous ne connaissez pas leur réglage. En particulier, souvenez-vous d'une chose essentielle à propos de Keras : si vous définissez une fonction de perte, d'optimisation ou d'activation avec une chaîne, _tous ses arguments seront définis sur leurs valeurs par défaut_. Cela signifie que, même si l'utilisation de chaînes de caractères est très pratique, vous devez être très prudent car cela peut facilement vous cacher des éléments critiques. (Toute personne essayant le défi optionnel ci-dessus devrait prendre bonne note de ce fait). Dans ce cas, où avons-nous défini un argument avec une chaîne de caractères ? Au départ, nous définissions la perte avec une chaîne de caractères, mais nous ne le faisons plus. Cependant, nous le faisons pour l'optimiseur. Cela pourrait-il nous cacher quelque chose ? Jetons un coup d'œil à [ses arguments](https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adam). Y a-t-il quelque chose qui ressort ? C'est exact : le taux d'apprentissage ! Lorsque nous indiquons simplement `'adam'` nous allons obtenir le taux d'apprentissage par défaut qui est de 0.001 (ou 1e-3). C'est beaucoup trop élevé pour un *transformer* ! En général, nous recommandons d'essayer des taux d'apprentissage entre 1e-5 et 1e-4 pour vos modèles soit entre 10X et 100X plus petit que la valeur que nous utilisons ici. Cela semble être un problème majeur, alors essayons de le réduire. Pour ce faire, nous devons importer l'objet `optimizer`. Pendant que nous y sommes, réinitialisons le modèle à partir du *checkpoint* au cas où l'entraînement avec un taux d'apprentissage élevé aurait endommagé ses poids : ```python from tensorflow.keras.optimizers import Adam model = TFAutoModelForSequenceClassification.from_pretrained(model_checkpoint) model.compile(optimizer=Adam(5e-5)) ``` <Tip> 💡 Vous pouvez également importer la fonction `create_optimizer()` de 🤗 <i>Transformers</i> qui vous donnera un optimiseur AdamW avec une décroissance du taux des poids correcte ainsi qu'un réchauffement et une décroissance du taux d'apprentissage. Cet optimiseur produira souvent des résultats légèrement meilleurs que ceux que vous obtenez avec l'optimiseur Adam par défaut. </Tip> Maintenant, nous pouvons essayer de *finetuner* le modèle avec le nouveau taux d'apprentissage : ```python model.fit(train_dataset) ``` ```python out 319/24543 [..............................] - ETA: 16:07 - loss: 0.9718 ``` Maintenant notre perte va vraiment aller quelque part ! L'entraînement semble enfin fonctionner. Il y a une leçon à tirer ici : lorsque votre modèle fonctionne mais que la perte ne diminue pas, et que vous êtes sûr que vos données sont correctes, c'est une bonne idée de vérifier les hyperparamètres comme le taux d'apprentissage et le taux de décroissance des poids. Un réglage trop élevé de l'un ou l'autre de ces paramètres risque fort de faire « caler » l'entraînement à une valeur de perte élevée. ## Autres problèmes potentiels Nous avons couvert les problèmes dans le script ci-dessus, mais il existe plusieurs autres erreurs courantes auxquelles vous pouvez être confronté. Jetons un coup d'oeil à une liste (très incomplète). ### Gérer les erreurs de manque de mémoire Le signe révélateur d'un manque de mémoire est une erreur du type "OOM when allocating tensor" (OOM étant l'abréviation de *out of memory*). Il s'agit d'un risque très courant lorsque l'on utilise de grands modèles de langage. Si vous rencontrez ce problème, une bonne stratégie consiste à diviser par deux la taille de votre batch et à réessayer. Gardez à l'esprit, cependant, que certains modèles sont *très* grands. Par exemple, le modèle GPT-2 complet possède 1,5 Go de paramètres, ce qui signifie que vous aurez besoin de 6 Go de mémoire rien que pour stocker le modèle, et 6 autres Go pour ses gradients ! Entraîner le modèle GPT-2 complet nécessite généralement plus de 20 Go de VRAM, quelle que soit la taille du batch utilisé, ce dont seuls quelques GPUs sont dotés. Des modèles plus légers comme `distilbert-base-cased` sont beaucoup plus faciles à exécuter et s'entraînent aussi beaucoup plus rapidement. <Tip> Dans la prochaine partie du cours, nous examinerons des techniques plus avancées qui peuvent vous aider à réduire votre empreinte mémoire et vous permettre de <i>finetuner</i> les plus grands modèles. </Tip> ### TensorFlow affamé 🦛 Une bizarrerie particulière de TensorFlow dont vous devez être conscient est qu'il s'alloue *toute* la mémoire de votre GPU dès que vous chargez un modèle ou que vous effectuez un entraînement. Puis il divise cette mémoire selon les besoins. Ce comportement est différent de celui d'autres *frameworks*, comme PyTorch, qui alloue la mémoire selon les besoins avec CUDA plutôt que de le faire en interne. L'un des avantages de l'approche de TensorFlow est qu'elle peut souvent donner des erreurs utiles lorsque vous manquez de mémoire et qu'elle peut récupérer de cet état sans planter tout le noyau CUDA. Mais il y a aussi un inconvénient important : si vous exécutez deux processus TensorFlow en même temps alors **vous allez passer un mauvais moment**. Si vous travaillez sur Colab, vous n'avez pas à vous soucier de cela. Si vous travaillez localement, vous devez absolument faire attention. En particulier, sachez que la fermeture d'un onglet de *notebook* n'entraîne pas nécessairement la fermeture de ce *notebook* ! Vous devrez peut-être sélectionner les *notebooks* en cours d'exécution (ceux qui ont une icône verte) et les fermer manuellement dans la liste des répertoires. Tout *notebook* en cours d'exécution qui utilisait TensorFlow peut encore utiliser une grande partie de la mémoire de votre GPU, ce qui signifie que tout nouveau *notebook* que vous démarrez peut rencontrer des problèmes très étranges. Si vous commencez à obtenir des erreurs concernant CUDA, BLAS ou cuBLAS dans du code qui fonctionnait auparavant, c'est très souvent le coupable. Vous pouvez utiliser une commande comme `nvidia-smi` pour vérifier si la plupart de votre mémoire est libre ou toujours utilisée. Si elle est toujours utilisée, c'est que quelque chose d'autre s'y accroche ! ### Vérifiez vos données (encore !) Votre modèle n'apprendra quelque chose que s'il est réellement possible d'apprendre quelque chose de vos données. S'il y a un *bug* qui corrompt les données ou si les étiquettes sont attribuées de manière aléatoire, il est très probable que vous n'obtiendrez aucun entraînement de modèle sur votre jeu de données. Un outil utile ici est `tokenizer.decode()`. Il transformera les `input_ids` en chaînes de caractères, afin que vous puissiez visualiser les données et voir si vos données d'entraînement renseignent ce que vous voulez. Par exemple, après avoir obtenu un `batch` de votre `tf.data.Dataset` comme nous l'avons fait ci-dessus, vous pouvez décoder le premier élément comme suit : ```py input_ids = batch["input_ids"].numpy() tokenizer.decode(input_ids[0]) ``` Vous pouvez ensuite la comparer avec la première étiquette, comme suit : ```py labels = batch["labels"].numpy() label = labels[0] ``` Une fois que vous pouvez visualiser vos données de cette manière, vous pouvez vous poser les questions suivantes : - les données décodées sont-elles compréhensibles ? - êtes-vous d'accord avec les étiquettes ? - y a-t-il une étiquette qui est plus courante que les autres ? - quelle devrait être la perte/métrique si le modèle prédisait une réponse aléatoire/toujours la même réponse ? Après avoir examiné vos données, examinez quelques-unes des prédictions du modèle. Si votre modèle produit des *tokens*, essayez aussi de les décoder ! Si le modèle prédit toujours la même chose, cela peut être dû au fait que votre jeu de données est biaisé en faveur d'une catégorie (pour les problèmes de classification). Des techniques telles que le suréchantillonnage des classes rares peuvent aider. D'autre part, cela peut également être dû à des problèmes d'entraînement tels que de mauvais réglages des hyperparamètres. Si la perte/la métrique que vous obtenez sur votre modèle initial avant entraînement est très différente de la perte/la métrique à laquelle vous vous attendez pour des prédictions aléatoires, vérifiez la façon dont votre perte ou votre métrique est calculée. Il y a probablement un bug. Si vous utilisez plusieurs pertes que vous ajoutez à la fin, assurez-vous qu'elles sont de la même échelle. Lorsque vous êtes sûr que vos données sont parfaites, vous pouvez voir si le modèle est capable de s'entraîner sur elles grâce à un test simple. ### Surentraînement du modèle sur un seul batch Le surentraînement est généralement une chose que nous essayons d'éviter lors de l'entraînement car cela signifie que le modèle n'apprend pas à reconnaître les caractéristiques générales que nous voulons qu'il reconnaisse et se contente de mémoriser les échantillons d'entraînement. Cependant, essayer d'entraîner votre modèle sur un batch encore et encore est un bon test pour vérifier si le problème tel que vous l'avez formulé peut être résolu par le modèle que vous essayez d'entraîner. Cela vous aidera également à voir si votre taux d'apprentissage initial est trop élevé. Une fois que vous avez défini votre `modèle`, c'est très facile. Il suffit de prendre un batch de données d'entraînement, puis de le traiter comme votre jeu de données entier que vous *finetunez* sur un grand nombre d'époques : ```py for batch in train_dataset: break # Assurez-vous que vous avez exécuté model.compile() et défini votre optimiseur, # et vos pertes/métriques si vous les utilisez. model.fit(batch, epochs=20) ``` <Tip> 💡 Si vos données d'entraînement ne sont pas équilibrées, veillez à créer un batch de données d'entraînement contenant toutes les étiquettes. </Tip> Le modèle résultant devrait avoir des résultats proches de la perfection sur le `batch`, avec une perte diminuant rapidement vers 0 (ou la valeur minimale pour la perte que vous utilisez). Si vous ne parvenez pas à ce que votre modèle obtienne des résultats parfaits comme celui-ci, cela signifie qu'il y a quelque chose qui ne va pas dans la façon dont vous avez formulé le problème ou dans vos données et vous devez donc y remédier. Ce n'est que lorsque vous parviendrez à passer le test de surentraînement que vous pourrez être sûr que votre modèle peut réellement apprendre quelque chose. <Tip warning={true}> ⚠️ Vous devrez recréer votre modèle et votre `Trainer` après ce test, car le modèle obtenu ne sera probablement pas capable de récupérer et d'apprendre quelque chose d'utile sur votre jeu de données complet. </Tip> ### Ne réglez rien tant que vous n'avez pas une première ligne de base Le réglage des hyperparamètres est toujours considéré comme la partie la plus difficile de l'apprentissage automatique mais c'est juste la dernière étape pour vous aider à gagner un peu sur la métrique. La plupart du temps, les hyperparamètres par défaut du `Trainer` fonctionneront très bien pour vous donner de bons résultats. Donc ne vous lancez pas dans une recherche d'hyperparamètres longue et coûteuse jusqu'à ce que vous ayez quelque chose qui batte la ligne de base que vous avez sur votre jeu de données. Une fois que vous avez un modèle suffisamment bon, vous pouvez commencer à le *finetuner* un peu. N'essayez pas de lancer un millier d'exécutions avec différents hyperparamètres mais comparez quelques exécutions avec différentes valeurs pour un hyperparamètre afin de vous faire une idée de celui qui a le plus d'impact. Si vous modifiez le modèle lui-même, restez simple et n'essayez rien que vous ne puissiez raisonnablement justifier. Veillez toujours à revenir au test de surentraînement pour vérifier que votre modification n'a pas eu de conséquences inattendues. ### Demander de l'aide Nous espérons que vous avez trouvé dans cette section des conseils qui vous ont aidé à résoudre votre problème. Si ce n'est pas le cas, n'oubliez pas que vous pouvez toujours demander de l'aide à la communauté sur le [forum](https://discuss.huggingface.co/). Voici quelques ressources (en anglais) supplémentaires qui peuvent s'avérer utiles : - [La reproductibilité comme vecteur des meilleures pratiques d'ingénierie](https://docs.google.com/presentation/d/1yHLPvPhUs2KGI5ZWo0sU-PKU3GimAk3iTsI38Z-B5Gw/edit#slide=id.p) par Joel Grus - [Liste de contrôle pour le débogage des réseaux de neurones](https://towardsdatascience.com/checklist-for-debugging-neural-networks-d8b2a9434f21) par Cecelia Shao - [Comment tester unitairement le code d'apprentissage automatique](https://medium.com/@keeper6928/how-to-unit-test-machine-learning-code-57cf6fd81765) par Chase Roberts - [Une recette pour entraîner les réseaux de neurones](http://karpathy.github.io/2019/04/25/recipe/) par Andrej Karpathy Bien sûr, tous les problèmes rencontrés lors de l'entraînement ne sont pas forcément de votre faute ! Si vous rencontrez quelque chose dans la bibliothèque 🤗 *Transformers* ou 🤗 *Datasets* qui ne semble pas correct, vous avez peut-être trouver un *bug*. Vous devez absolument nous en parler pour qu'on puisse le corriger. Dans la section suivante, nous allons vous expliquer exactement comment faire.
course/chapters/fr/chapter8/4_tf.mdx/0
{ "file_path": "course/chapters/fr/chapter8/4_tf.mdx", "repo_id": "course", "token_count": 16242 }
117
# Glossaire | Original | Français | |-----------------------------|--------------------------------- | | Accuracy | Précision | | Backward Pass | Passe arrière | | Batch | *Batch* | | Benchmark | *Benchmark* | | Cache | Cache | | Chapter | Chapitre | | Checkpoint | *Checkpoint* (plus rarement « point de sauvegarde ») | | Colab Notebook | *Notebook* Google Colab | | Colator function | Fonction d'assemblement | | Command | Commande | | Configuration | Configuration | | Course | Cours | | Dataloader | Chargeur de données | | Dependency | Dépendances | | Deployment | Déploiement | | Development | Développement | | Dictionary | Dictionnaire | | Download | Télécharger | | Feature | Variable | | Field | Champ | | Fine-tuning | Finetuning | | Folder | Dossier | | Forward Pass | Passe avant | | Google | *Google* | | Hugging Face | *Hugging Face* | | Inference | Inférence | | Learning rate | Taux d’apprentissage | | Library | Bibliothèque | | Linux | Linux | | Loss function | Fonction de perte/coût | | Loop | Boucle | | macOS | macOS | | Model | Modèle | | Hugging Face Hub | *Hub* d’*Hugging Face* | | Module | Module | | Natural Language Processing | Traitement du langage naturel | | Package | Paquet | | Padding | Rembourrage | | Parameter | Paramètre | | Python | Python | | PyTorch | PyTorch | | Samples | Echantillons | | Scheduler | Planificateur | | Script | Script | | Setup | Installation | | TensorFlow | TensorFlow | | Terminal | Terminal | | Tokenizer | Tokeniseur | | Train | Entraîner | | Transformer | *Transformer* | | Virtual Environment | Environnement virtuel | | Weight decay | Taux de décroissance des poids | | Weights | Poids | | Windows | *Windows* | | Working Environment | Environnement de travail | A noter que les mots anglais non traduits sont indiqués en italique dans le cours. De plus, les abréviations techniques comme API, GPU, TPU, etc. ne sont pas traduites.
course/chapters/fr/glossary/1.mdx/0
{ "file_path": "course/chapters/fr/glossary/1.mdx", "repo_id": "course", "token_count": 3266 }
118
# सारांश <CourseFloatingBanner chapter={1} classNames="absolute z-10 right-0 top-0" /> इस अध्याय में, आपने देखा कि 🤗 ट्रांसफॉर्मर के उच्च-स्तरीय `पाइपलाइन ()` फ़ंक्शन का उपयोग करके विभिन्न प्राकृतिक भाषा प्रसंस्करण कार्यों को कैसे किया जाता है। आपने यह भी देखा कि हब में मॉडलों की खोज और उनका उपयोग कैसे करें, साथ ही सीधे अपने ब्राउज़र में मॉडलों का परीक्षण करने के लिए अनुमान API का उपयोग कैसे करें। हमने चर्चा की कि ट्रांसफॉर्मर मॉडल उच्च स्तर पर कैसे काम करते हैं और ट्रांसफर लर्निंग और फाइन-ट्यूनिंग के महत्व के बारे में बात की। एक महत्वपूर्ण पहलू यह है कि आप पूर्ण आर्किटेक्चर या केवल एन्कोडर या डिकोडर का उपयोग कर सकते हैं, यह इस बात पर निर्भर करता है कि आप किस प्रकार के कार्य को हल करना चाहते हैं। निम्न तालिका इसे सारांशित करती है: | मॉडल | उदाहरण | कार्य | |-----------------|--------------------------------------------|----------------------------------------------------------------------------------| | एनकोडर | ALBERT, BERT, DistilBERT, ELECTRA, RoBERTa | वाक्य वर्गीकरण, नामित इकाई मान्यता, प्रश्न उत्तर निकालने वाला | | डिकोडर | CTRL, GPT, GPT-2, Transformer XL | पाठ निर्माण | | एनकोडर-डिकोडर | BART, T5, Marian, mBART | संक्षिप्तीकरण, अनुवाद, प्रश्न उत्तर बनाना |
course/chapters/hi/chapter1/9.mdx/0
{ "file_path": "course/chapters/hi/chapter1/9.mdx", "repo_id": "course", "token_count": 1640 }
119
<!-- DISABLE-FRONTMATTER-SECTIONS --> # Quiz di fine capitolo <CourseFloatingBanner chapter={1} classNames="absolute z-10 right-0 top-0" /> In questo capitolo abbiamo parlato di molti argomenti! Non preoccuparti se non hai capito tutto nel dettaglio: i prossimi capitoli ti aiuteranno a capire come molte di queste cose funzionano dietro le quinte. Prima di procedere, però, verifichiamo cos'hai imparato in questo capitolo! ### 1. Esplora l'Hub e cerca il checkpoint `roberta-large-mnli`. Quale compito svolge? <Question choices={[ { text: "Riassunto testuale", explain: "Rivisita il link e prova di nuovo: <a href=\"https://huggingface.co/roberta-large-mnli\">roberta-large-mnli page</a>." }, { text: "Classificazione testuale", explain: "Più precisamente, determina se due frasi sono connesse logicamente su tre livelli associati alle etichette 'contradiction', 'neutral' e 'entailment'. Questo compito viene detto anche <em>natural language inference</em>.", correct: true }, { text: "Generazione testuale", explain: "Rivisita il link e prova di nuovo: <a href=\"https://huggingface.co/roberta-large-mnli\">roberta-large-mnli page</a>." } ]} /> ### 2. Cosa restituisce il codice seguente? ```py from transformers import pipeline ner = pipeline("ner", grouped_entities=True) ner("My name is Sylvain and I work at Hugging Face in Brooklyn.") ``` <Question choices={[ { text: "Restituisce un punteggio associato alla frase, con etichette del tipo \"positive\" o \"negative\".", explain: "Sbagliato! Se così fosse, si tratterebbe di una pipeline di tipo <code>sentiment-analysis</code>." }, { text: "Genera e restituisce testo che completa la frase di partenza.", explain: "Sbagliato! Se così fosse, si tratterebbe di una pipeline di tipo <code>text-generation</code>.", }, { text: "Restituisce i termini che rappresentano persone, organizzazioni o luoghi.", explain: "Inoltre, grazie a <code>grouped_entities=True</code>, la pipeline è in grado di raggruppare le parole che appartengono alla stessa entità, come \"Hugging Face\".", correct: true } ]} /> ### 3. Cosa dovrebbe rimpiazzare "..." in questo estratto di codice? ```py from transformers import pipeline filler = pipeline("fill-mask", model="bert-base-cased") result = filler("...") ``` <Question choices={[ { text: "Questo &#60;mask> aspetta te.", explain: "Sbagliato. Controlla la card del modello <code>bert-base-cased</code> e cerca di capire il tuo errore." }, { text: "Questo [MASK] aspetta te.", explain: "Corretto! Il mask token utilizzato dal modello è [MASK].", correct: true }, { text: "Questo signore aspetta te.", explain: "Sbagliato. Questa pipeline completa parole nascoste, quindi necessita di un mask token nell'input." } ]} /> ### 4. Perché questo codice non funziona? ```py from transformers import pipeline classifier = pipeline("zero-shot-classification") result = classifier("This is a course about the Transformers library") ``` <Question choices={[ { text: "Questa pipeline richiede che le etichette siano fornite per poter classificare il testo.", explain: "Esatto! Per essere corretto, il codice deve includere <code>candidate_labels=[...]</code>.", correct: true }, { text: "Questa pipeline richiede diverse frasi, non solo una.", explain: "Sbagliato, anche se quando usata correttamente, questa pipeline può tranquillamente processare una lista di frasi (come tutte le altre pipeline)." }, { text: "Come al solito, la libreria Transformer di 🤗 non funziona.", explain: "Ci rifiutiamo di commentare la tua risposta!" }, { text: "Questa pipeline richiede un input più lungo. Quello fornito è troppo corto.", explain: "Sbagliato. Sappi che per processare testi molto lunghi, questa pipeline li deve troncare." } ]} /> ### 5. Cosa significa "transfer learning"? <Question choices={[ { text: "Trasferire la conoscenza di un modello pre-addestrato a un nuovo modello, addestrando quest'ultimo sulla stessa banca dati.", explain: "No, in quel caso avremmo a che fare con due versioni dello stesso modello." }, { text: "Trasferire la conoscenza di un modello pre-addestrato a un nuovo modello addestrando il secondo con i pesi del primo.", explain: "Corretto. Quando il secondo modello viene addestrato ad un nuovo compito, *trasferisce* la conoscenza del primo modello.", correct: true }, { text: "Trasferire la conoscenza di un modello pre-addestrato a un nuovo modello costruendo il secondo con la stessa architettura del primo.", explain: "L'architettuta è semplicemente il modo in cui il modello è costruito. In questo caso, la conoscenza non è né condivisa né trasmessa." } ]} /> ### 6. Vero o falso? Solitamente un modello linguistico non richiede etichette in fase di pre-addestramento. <Question choices={[ { text: "Vero", explain: "Solitamente, il pre-addestramento è <em>self-supervised</em>, il che significa che le etichette sono create direttamente a partire dall'input (come quando una pipeline predice la parola seguente o indovina parole nascoste).", correct: true }, { text: "Falso", explain: "La risposta non è corretta." } ]} /> ### 7. Seleziona la frase che meglio descrive i termini "modello," "architettura," e "pesi." <Question choices={[ { text: "Se un modello fosse un palazzo, l'architettura ne sarebbe il progetto e i pesi le persone che ci vivono.", explain: "In un caso del genere, i pesi sarebbero piuttosto i mattoni e i materiali utilizzati per costruire il palazzo." }, { text: "L'architettura è una cartina per costruire un dato modello, i cui pesi sono le città rappresentate sulla mappa.", explain: "Il problema di questa metafora è che, solitamente, una cartina rappresenta un'unica realtà (c'è una sola città in Francia di nome Parigi). Per una data architettura, possono esistere numerosi pesi." }, { text: "Un'architettura è una successione di funzioni matematiche usate per costruire un modello; i suoi pesi sono i parametri delle funzioni.", explain: "Lo stesso insieme di funzioni matematiche (architettura) può essere utilizzato per costruire modelli diversi utilizzando parametri diversi (pesi).", correct: true } ]} /> ### 8. Quale dei seguenti modelli utilizzeresti per completare dei prompt con testo generato? <Question choices={[ { text: "Un modello encoder", explain: "Un modello encoder genera una rappresentazione della frase intera che è più adatta a compiti come la classificazione." }, { text: "Un modello decoder", explain: "I modelli decoder sono perfetti per generare testo a partire da un prompt.", correct: true }, { text: "Un modello sequence-to-sequence", explain: "I modelli sequence-to-sequence sono più adatti a compiti di generazione di frasi a partire da frasi input, non da un prompt." } ]} /> ### 9. Quale dei seguenti modelli utilizzeresti per riassumere testi? <Question choices={[ { text: "Un modello encoder", explain: "Un modello encoder genera una rappresentazione della frase intera che è più adatta a compiti come la classificazione." }, { text: "Un modello decoder", explain: "I modelli decoder sono capaci di generare un testo di output (come un riassunto), ma non sono in grado di estrarre conoscenze da un contesto, come ad esempio da un testo da riassumere." }, { text: "Un modello sequence-to-sequence", explain: "I modelli sequence-to-sequence sono perfetti per compiti di riassunto.", correct: true } ]} /> ### 10. Quale dei seguenti modelli utilizzeresti per classificare input testuali sulla base di determinate etichette? <Question choices={[ { text: "Un modello encoder", explain: "Un modello encoder genera una rappresentazione della frase intera che è più adatta a compiti come la classificazione.", correct: true }, { text: "Un modello decoder", explain: "I modelli decoder sono capaci di generare un testo di output, non di estrarre etichette da frasi." }, { text: "Un modello sequence-to-sequence", explain: "I modelli sequence-to-sequence sono più adatti a compiti in cui si genera testo sulla base di una frase input, non di un'etichetta.", } ]} /> ### 11. Qual è la possibile origine di un bias osservato in un modello? <Question choices={[ { text: "Il modello è una versione affinata di un modello pre-addestrato, dal quale eredita il bias.", explain: "Nel Transfer Learning, i bias di un modello pre-addestrato si osservano anche nel modello affinato.", correct: true }, { text: "I dati sui quali il modello è stato addetsrato contengono errori.", explain: "Questa è la fonte di errori più comune, ma non la sola.", correct: true }, { text: "La misura ottimizzata dal modello è errata.", explain: "Il modo in cui il modello viene addestrato è una causa di errori meno ovvia. Un modello ottimizza ciecamente qualsiasi metrica tu scelga, senza ripensamenti.", correct: true } ]} />
course/chapters/it/chapter1/10.mdx/0
{ "file_path": "course/chapters/it/chapter1/10.mdx", "repo_id": "course", "token_count": 3549 }
120
<FrameworkSwitchCourse {fw} /> # Introduzione Nel [Capitolo 2](/course/chapter2) abbiamo scoperto come utilizzare i tokenizzatori e i modelli preaddestrati per effettuare delle predizioni. Ma cosa fare se si vuole affinare un modello preaddestrato col tuo dataset? Lo scopriremo in questo capitolo! Impareremo: {#if fw === 'pt'} * Come preparare un grande dataset dall'Hub * Come usare l'API di alto livello `Trainer` per affinare un modello * Come usare un ciclo di addestramento personalizzato * Come utilizzare la libreria 🤗 Accelerate per eseguire facilmente quel ciclo di addestramento personalizzato su qualsiasi sistema distribuito {:else} * Come preparare un grande dataset dall'Hub * Come usare Keras per affinare un modello * Come usare Keras per ottenere delle predizioni * Come usare una metrica personalizzata {/if} Per caricare i checkpoint di addestramento sull'Hub di Hugging Face è necessario un account huggingface.co: [creare un account](https://huggingface.co/join)
course/chapters/it/chapter3/1.mdx/0
{ "file_path": "course/chapters/it/chapter3/1.mdx", "repo_id": "course", "token_count": 342 }
121
# Big data? Ci pensa 🤗 Datasets! <CourseFloatingBanner chapter={5} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/it/chapter5/section4.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/it/chapter5/section4.ipynb"}, ]} /> Al giorno d'oggi non è raro trovarsi a lavorare con dataset grandi diversi gigabyte, soprattutto quando si vuole addestrare un transformer come BERT o GPT-2 da zero. In questi casi, persino _caricare_ i dati può essere un'impresa difficile. Ad esempio, il corpus WebText utilizzato per preaddestrare GPT-2 contiente più di 8 milioni di documenti e 40gb di testo -- caricare un dataset del genere sulla RAM del tuo portatile gli farebbe venire un colpo! Per fortuna, 🤗 Datasets è stato sviluppato per superare queste limitazioni, e può risolvere i problemi relativi alla gestione della memoria trattando i dataset come file _memory-mapped_, e quelli relativi ai limiti del disco rigido attraverso lo _stream processing_ delle voci del corpus. <Youtube id="JwISwTCPPWo"/> In questa sezione esploreremo queste funzionalità di 🤗 Datasets con un enorme corpus di 825 GB conosciuto come [Pile](https://pile.eleuther.ai). Iniziamo! ## Cos'è Pile? The Pile è un corpus testuale creato da [EleutherAI](https://www.eleuther.ai) per addestrare modelli di linguaggio su grande scala. Include un grande varietà di dataset, a partire da articoli scientifici, repository di codici da GitHub, e testi dal web filtrati. Il corpus di addestramento è disponibili in [frammenti da 14 GB](https://the-eye.eu/public/AI/pile/), ed è possibile scaricare diverse delle [componenti singole](https://the-eye.eu/public/AI/pile_preliminary_components/). Iniziamo dando uno sguardo al dataset PubMed Abstracts, un corpus di abstract da 15 milioni di pubblicazioni in ambito biomedico da [PubMed](https://pubmed.ncbi.nlm.nih.gov/). Il dataset è in [formato JSON Lines](https://jsonlines.org) ed è stato compressato usando la libreria `zstandard`, per cui dobbiamo prima installarla: ```py !pip install zstandard ``` Ora, possiamo caricare il dataset utilizzando il meotodo per file remoti che abbiamo visto nella [sezione 2](/course/chapter5/2): ```py from datasets import load_dataset # Ci vuole qualche minuto per l'esecuzione, quindi preparati un tè o un caffè nell'attesa :) data_files = "https://the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst" pubmed_dataset = load_dataset("json", data_files=data_files, split="train") pubmed_dataset ``` ```python out Dataset({ features: ['meta', 'text'], num_rows: 15518009 }) ``` Possiamo vedere che ci sono 15.518.009 righe e 2 colonne nel nostro dataset -- un bel po'! <Tip> ✎ Di base, 🤗 Datasets decomprimerà i file necessari a caricare un dataset. Se vuoi risparmiare sullo spazio dell'hard disk, puoi passare `DownloadConfig(delete_extracted_True)` all'argomento `download_config` di `load_dataset()`. Per maggiori dettagli leggi la [documentazione](https://huggingface.co/docs/datasets/package_reference/builder_classes#datasets.DownloadConfig). </Tip> Ispezioniamo i contenuti del primo esempio: ```py pubmed_dataset[0] ``` ```python out {'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection.\nTo determine the prevalence of hypoxaemia in children aged under 5 years suffering acute lower respiratory infections (ALRI), the risk factors for hypoxaemia in children under 5 years of age with ALRI, and the association of hypoxaemia with an increased risk of dying in children of the same age ...'} ``` Okay, questo sembra proprio l'abstract di un articolo di medicina. Ora vediamo quanta RAM è stata usata per caricare il dataset! ## La magia del memory mapping Un modo semplice per calcolare l'uso di memoria su Python è utilizzando la libreria [`psutil`](https://psutil.readthedocs.io/en/latest/), che può essere installata con `pip` come segue: ```python !pip install psutil ``` `psutil` offre una classe `Process` che permette di controllare l'utilizzo della memoria del processo attuale come segue:: ```py import psutil # Process.memory_info mostra i dati in byte, quindi convertiamo in megabyte print(f"RAM used: {psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB") ``` ```python out RAM used: 5678.33 MB ``` L'attributo `rss` qui fa riferimento alla _grandezza del resident set_, che equivale alla frazione di memoria che il processo occupa nella RAM. Questo valore include inoltre la memoria utilizzata dall'interprete Python e dalle librerie caricate, per cui l'ammontare effettivo utilizzato per caricare il dataset è un po' più piccolo. Per fare un confronto, vediamo quant'è grande il dataset su disco utilizzando l'attributo `dataset_size`. Come prima, il risultato è espresso in byte, e abbiamo bisogno di convertirlo in gigabyte: ```py print(f"Number of files in dataset : {pubmed_dataset.dataset_size}") size_gb = pubmed_dataset.dataset_size / (1024**3) print(f"Dataset size (cache file) : {size_gb:.2f} GB") ``` ```python out Number of files in dataset : 20979437051 Dataset size (cache file) : 19.54 GB ``` Bene -- nonostante sia grande quasi 30 GB, siamo in grado di caricare e accedere al dataset utilizzando molta meno RAM! <Tip> ✏️ **Provaci tu!** Scegli uno dei [subset](https://the-eye.eu/public/AI/pile_preliminary_components/) di Pile che è più grande della RAM del tuo PC o del tuo portatile, caricalo utilizzando 🤗 Datasets e calcola la quantità di RAM utilizzata. Nota che per avere un valore preciso, dovrai creare un nuovo processo. Puoi trovare le grandezze decompresse di ogni subset nella Tavola 1 dell'[articolo su Pile](https://arxiv.org/abs/2101.00027) </Tip> Se hai dimestichezza con Pandas, questo risultato potrebbe sorprenderti, vista la famosa [regola di Wes Kinney](https://wesmckinney.com/blog/apache-arrow-pandas-internals/), ovvero che, in linea di massima, serve una RAM 5-10 volte più grande del dataset che vuoi caricare. Come fa 🤗 Datasets a risolvere questo problema di gestione della memoria? 🤗 Datasets tratta ogni dataset come un [file mappato in memoria](https://it.wikipedia.org/wiki/File_mappato_in_memoria), il che permette di avere un mapping tra la RAM e l'archiviazione dei file di sistema, che permette alla librera di accedere e operare su elementi del dataset senza doverli caricare completamente in memoria. I file mappati in memoria possono inoltre essre condivisi su più processi, il che permette a metodi come `Dataset.map()` di poter essere eseguiti in parallelo senza bisogno di spostare o copiare il dataset. Dietro le quinte, tutto ciò è realizzato dal formato di memoria [Apache Arrow](https://arrow.apache.org) e dalla libreria [`pyarrow`](https://arrow.apache.org/docs/python/index.html), che rendono più veloci il caricamento e il processamento dei dati. (per maggiori dettagli su Apache Arrow, e per un confronto con Pandas, dai un'occhiata al [post di Dejan Simic](https://towardsdatascience.com/apache-arrow-read-dataframe-with-zero-memory-69634092b1a).) Per vederlo in azione, eseguiamo un piccolo test di velocità con un loop su tutti gli elementi nel dataset PubMed Abstracts: ```py import timeit code_snippet = """batch_size = 1000 for idx in range(0, len(pubmed_dataset), batch_size): _ = pubmed_dataset[idx:idx + batch_size] """ time = timeit.timeit(stmt=code_snippet, number=1, globals=globals()) print( f"Iterated over {len(pubmed_dataset)} examples (about {size_gb:.1f} GB) in " f"{time:.1f}s, i.e. {size_gb/time:.3f} GB/s" ) ``` ```python out 'Iterated over 15518009 examples (about 19.5 GB) in 64.2s, i.e. 0.304 GB/s' ``` Abbiamo usato il modulo di Python `timeit` per calcolare il tempo di esecuzione impiegato da `code_snippet`. Tipicamente l'iterazione su un dataset impiega un tempo che va da un decimo di GB al secondo, a diversi GB al secondo. Questo funziona perfettamente per la maggior parte delle applicazioni, ma a volte avrai bisogno di lavorare con un dataset che è troppo grande persino per essere salvato sul tuo portatile. Ad esempio, se cercassimo di scaricare Pile per intero, avremo bisogno di 825 GB di spazio libero su disko! In questi casi, 🤗 Datasets permette di utilizzare processi di streaming che ci permettono di scaricare e accedere al volo ai dati, senza bisogno di scaricare l'intero dataset. Diamo un'occhiata a come funziona. <Tip> 💡 Nei notebook Jupyter, puoi cronometrare le celle utilizzando la [funzione magica `%%timeit`](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-timeit) </Tip> ## Streaming di dataset Per abilitare lo streaming dei dataset devi semplicemente passare l'argomento `streaming=True` alla funzione `load_dataset()`. Ad esempio, carichiamo un'altra volta il dataset PubMed Abstract, ma in modalità streaming: ```py pubmed_dataset_streamed = load_dataset( "json", data_files=data_files, split="train", streaming=True ) ``` Invece del solito `Dataset` che abbiamo incontrato in precedenza in questo capitolo, l'oggetto ritornato con `streaming=True' è un `IterableDataset`. Come suggerito dal nome, per accedere agli elementi di un `IterableDataset`, dobbiamo iterare di esso. Possiamo accedere al primo elemento del nostro dataset in streaming come segue: ```py next(iter(pubmed_dataset_streamed)) ``` ```python out {'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection.\nTo determine the prevalence of hypoxaemia in children aged under 5 years suffering acute lower respiratory infections (ALRI), the risk factors for hypoxaemia in children under 5 years of age with ALRI, and the association of hypoxaemia with an increased risk of dying in children of the same age ...'} ``` Gli elementi di un dataset in streaming possono essere processati al volo utilizzando `IterableDataset.map()`, che è utile durante l'addestramento se hai bisogno di tokenizzare gli input. Il processo è uguale a quello che abbiamo utilizzato per tokenizzare il nostro dataset nel [Capitolo 3](/course/chapter3), con l'unica differenza che ora ritorneremo gli output uno alla volta: ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") tokenized_dataset = pubmed_dataset_streamed.map(lambda x: tokenizer(x["text"])) next(iter(tokenized_dataset)) ``` ```python out {'input_ids': [101, 4958, 5178, 4328, 6779, ...], 'attention_mask': [1, 1, 1, 1, 1, ...]} ``` <Tip> 💡 Per velocizzare la tokenizzazione con lo streaming puoi passare `batchet=True`, come abbiamo visto nell'ultima sezione. Questo processerà gli esempi per batch. Di default, la grandezza di un batch è 1.000, e può essere specificata attraverso l'argomento `batch_size`. </Tip> È anche possibile mescolare un dataset in streaming utilizzato `Iterabledataset.shuffle()`, ma a differenza di `Dataset.shuffle()`, questo metodo mescola solo gli elementi in un `buffer_size` predefinito: ```py shuffled_dataset = pubmed_dataset_streamed.shuffle(buffer_size=10_000, seed=42) next(iter(shuffled_dataset)) ``` ```python out {'meta': {'pmid': 11410799, 'language': 'eng'}, 'text': 'Randomized study of dose or schedule modification of granulocyte colony-stimulating factor in platinum-based chemotherapy for elderly patients with lung cancer ...'} ``` In questo esempio, abbiamo selezionato un esempio casuale dai primi 10.000 esempi nel buffer. Una volta che accediamo a un esempio, il suo posto nel buffer è subito occupato dall'esempio successivo nel corpus (in questo caso l'esempio 10.0001). Puoi inoltre selezionare gli elementi da un dataset in streaming utilizzando le funzioni `IterableDataset.take()` a `IterableDataset.skip()`, che funzionano un po' come `Dataset.select()`. Ad esempio, per selezionare i primi 5 esempi nel dataset PubMed Abstract dovremmo fare come segue: ```py dataset_head = pubmed_dataset_streamed.take(5) list(dataset_head) ``` ```python out [{'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection ...'}, {'meta': {'pmid': 11409575, 'language': 'eng'}, 'text': 'Clinical signs of hypoxaemia in children with acute lower respiratory infection: indicators of oxygen therapy ...'}, {'meta': {'pmid': 11409576, 'language': 'eng'}, 'text': "Hypoxaemia in children with severe pneumonia in Papua New Guinea ..."}, {'meta': {'pmid': 11409577, 'language': 'eng'}, 'text': 'Oxygen concentrators and cylinders ...'}, {'meta': {'pmid': 11409578, 'language': 'eng'}, 'text': 'Oxygen supply in rural africa: a personal experience ...'}] ``` Allo stesso modo, è possibile utilizzare la funzione `IterableDataset.skip()` per creare sezioni di addestramento e di validazione da un dataset mescolato, come segue: ```py # Salta i primi 1.000 esempi, il resto viene incluso nell'insieme di addestramento train_dataset = shuffled_dataset.skip(1000) # Includi i primi 1.000 esempi nell'insieme di validazione validation_dataset = shuffled_dataset.take(1000) ``` Concludiamo la nostra ricognizione dello streaming di dataset con un'applicazione comune: la combinazione di più dataset per creare un unico corpus. 🤗 Datasets fornisce una funzione `interleave_datasets()`, che converte una lista di oggetti `IterableDataset` in un unico `IterableDataset`, dove gli elementi del nuovo dataset sono ottenuti alternando tra gli esempi forniti. Questa funzione è particolarmente utile quando cerchiamo di combinare dataset di grandi dimensioni, come esempio possiamo utilizzare in streaming la sezione FreeLaw del Pile, un dataset di 51 GB di pareri legali dai tribunali statunitensi: ```py law_dataset_streamed = load_dataset( "json", data_files="https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst", split="train", streaming=True, ) next(iter(law_dataset_streamed)) ``` ```python out {'meta': {'case_ID': '110921.json', 'case_jurisdiction': 'scotus.tar.gz', 'date_created': '2010-04-28T17:12:49Z'}, 'text': '\n461 U.S. 238 (1983)\nOLIM ET AL.\nv.\nWAKINEKONA\nNo. 81-1581.\nSupreme Court of United States.\nArgued January 19, 1983.\nDecided April 26, 1983.\nCERTIORARI TO THE UNITED STATES COURT OF APPEALS FOR THE NINTH CIRCUIT\n*239 Michael A. Lilly, First Deputy Attorney General of Hawaii, argued the cause for petitioners. With him on the brief was James H. Dannenberg, Deputy Attorney General...'} ``` Questo dataset è abbastanza grande da mettere sotto sforzo la RAM di molto portatili, ma siamo riusciti a caricarlo e accedervi senza alcun problema! Ora cominiamo gli esempi di FreeLaw e di PubMed Abstracts con la funzione `interleave_datasets()`: ```py from itertools import islice from datasets import interleave_datasets combined_dataset = interleave_datasets([pubmed_dataset_streamed, law_dataset_streamed]) list(islice(combined_dataset, 2)) ``` ```python out [{'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection ...'}, {'meta': {'case_ID': '110921.json', 'case_jurisdiction': 'scotus.tar.gz', 'date_created': '2010-04-28T17:12:49Z'}, 'text': '\n461 U.S. 238 (1983)\nOLIM ET AL.\nv.\nWAKINEKONA\nNo. 81-1581.\nSupreme Court of United States.\nArgued January 19, 1983.\nDecided April 26, 1983.\nCERTIORARI TO THE UNITED STATES COURT OF APPEALS FOR THE NINTH CIRCUIT\n*239 Michael A. Lilly, First Deputy Attorney General of Hawaii, argued the cause for petitioners. With him on the brief was James H. Dannenberg, Deputy Attorney General...'}] ``` Abbiamo utilizzato la funzione `islice()` del modulo Python `itertools` per selezionare i primi due esempi dai dataset combinati, e abbiamo visto che corrispondono ai primi esempi di ognuno dei due dataset originali. Infine, se vuoi processare il Pile in streaming, in tutti i suoi 825 GB, puoi recuperare tutti i file preparati, come segue: ```py base_url = "https://the-eye.eu/public/AI/pile/" data_files = { "train": [base_url + "train/" + f"{idx:02d}.jsonl.zst" for idx in range(30)], "validation": base_url + "val.jsonl.zst", "test": base_url + "test.jsonl.zst", } pile_dataset = load_dataset("json", data_files=data_files, streaming=True) next(iter(pile_dataset["train"])) ``` ```python out {'meta': {'pile_set_name': 'Pile-CC'}, 'text': 'It is done, and submitted. You can play “Survival of the Tastiest” on Android, and on the web...'} ``` <Tip> ✏️ **Prova tu!** Usa uno dei corpora Common Crawl come [`mc4`](https://huggingface.co/datasets/mc4) oppure [`oscar`](https://huggingface.co/datasets/oscar) per crare un dataset multilingue in streaming, che rappresenta le proporzioni delle lingue parlate in un paese a tua scelta. Ad esempio, le quattro lingue ufficiali in Svizzera sono il tedesco, il francesce, l'italiano e il romancio, per cui potresti creare un corpus della Svizzera raccogliendo i campioni da Oscar, secondo la percentuale di parlanti di ognuna. </Tip> Ora hai a tua disposizione tutti gli strumenti per caricare e processare dataset di ogni tipo -- ma a meno che tu non sia estremamente fortunato, arriverà un momento nel tuo cammino in cui dovrai effettivamente creare un dataset per risolvere i tuoi problemi. Questo sarà argomento della prossima sezione!
course/chapters/it/chapter5/4.mdx/0
{ "file_path": "course/chapters/it/chapter5/4.mdx", "repo_id": "course", "token_count": 6321 }
122
- title: 0. セットアップ sections: - local: chapter0/1 title: イントロダクション - title: 1. Transformerモデルについて sections: - local: chapter1/1 title: イントロダクション - local: chapter1/2 title: 自然言語処理 / NLP(Natural Language Processing) - local: chapter1/3 title: Transformersで何ができる? - local: chapter1/4 title: Transformersの仕組みについて - local: chapter1/5 title: エンコーダーモデル - local: chapter1/6 title: デコーダーモデル - local: chapter1/7 title: Sequence-to-sequence モデル - local: chapter1/8 title: バイアスと限界 - local: chapter1/9 title: まとめ - local: chapter1/10 title: 章末クイズ - title: 2. 🤗 Transformersの使用 sections: - local: chapter2/1 title: イントロダクション - local: chapter2/2 title: pipelineの裏側 - local: chapter2/3 title: モデル - local: chapter2/4 title: トークナイザ - local: chapter2/5 title: 複数系列の処理 - title: 4. モデルとトークナイザーの共有 sections: - local: chapter4/1 title: ハギングフェイスハブ - local: chapter4/2 title: 学習済みモデルを使う - local: chapter4/3 title: 学習済みモデルを共有する - local: chapter4/4 title: モデルカードを作成する - local: chapter4/5 title: パート1終了! - local: chapter4/6 title: チャプター修了クイズ quiz: 4 - title: 7. 主要な自然言語処理タスク sections: - local: chapter7/1 title: イントロダクション - local: chapter7/2 title: トークン分類 - local: chapter7/3 title: マスク言語モデルの微調整 - local: chapter7/4 title: 翻訳 - local: chapter7/5 title: 要約 - local: chapter7/6 title: 因果言語モデルを一から学習 - local: chapter7/7 title: 質問応答 - local: chapter7/8 title: NLPをマスター - local: chapter7/9 title: チャプター修了クイズ quiz: 7 - title: 8. 助けの求め方 sections: - local: chapter8/1 title: イントロダクション - local: chapter8/2 title: エラーを見つけた時に最初にすること - local: chapter8/6 title: 概要 - title: コースのイベント sections: - local: events/2 title: パート2公開記念イベント
course/chapters/ja/_toctree.yml/0
{ "file_path": "course/chapters/ja/_toctree.yml", "repo_id": "course", "token_count": 1107 }
123
<FrameworkSwitchCourse {fw} /> # 複数系列の処理 {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/en/chapter2/section5_tf.ipynb"}, ]} /> {/if} {#if fw === 'pt'} <Youtube id="M6adb1j2jPI"/> {:else} <Youtube id="ROxrFOEbsQE"/> {/if} 前のセクションでは、最も単純な使用例である、単一の短い系列(テキスト)に対して推論を行う方法を見てきました。しかし、これについて以下のような疑問をお持ちの方もいるかもしれません。 - 複数の系列をどのように処理するのか? - 長さの異なる複数の系列をどのように処理するのか? - モデルがうまく機能するためには、単語のインデックスだけが入力として必要なのか? - 系列が長すぎてしまうということはあるのか? これらの疑問について、実際はどのような問題があるのか、そして🤗 Transformers APIを使ってどのように解決できるのかを見ていきましょう。 ## モデルへのバッチ入力 以前のエクササイズで、系列が数値のリストに変換される方法を見てきました。この数値列をテンソルに変換し、モデルに入力してみましょう。 {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = torch.tensor(ids) # This line will fail. model(input_ids) ``` ```python out IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = tf.constant(ids) # This line will fail. model(input_ids) ``` ```py out InvalidArgumentError: Input to reshape is a tensor with 14 values, but the requested shape has 196 [Op:Reshape] ``` {/if} おっと!「セクション2のパイプラインの手順に従ったのに、なぜ失敗したのか?」と思われるかもしれません。 この問題はモデルに単一の系列を入力しようとしたために発生しました。🤗 Transformersモデルは、デフォルトでは複数の系列を入力として受け付けます。ここでは、`sequence`に対してトークナイザを適用したときに、トークナイザがその背後で行ったすべての処理を行おうとしました。しかし、もう少し詳しく見てみると、トークナイザは入力IDのリストをテンソルに変換するだけでなく、それに対して次元を追加していることがわかります。 {#if fw === 'pt'} ```py tokenized_inputs = tokenizer(sequence, return_tensors="pt") print(tokenized_inputs["input_ids"]) ``` ```python out tensor([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102]]) ``` {:else} ```py tokenized_inputs = tokenizer(sequence, return_tensors="tf") print(tokenized_inputs["input_ids"]) ``` ```py out <tf.Tensor: shape=(1, 16), dtype=int32, numpy= array([[ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102]], dtype=int32)> ``` {/if} それでは次元を追加して再度試してみましょう。 {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = torch.tensor([ids]) print("Input IDs:", input_ids) output = model(input_ids) print("Logits:", output.logits) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) input_ids = tf.constant([ids]) print("Input IDs:", input_ids) output = model(input_ids) print("Logits:", output.logits) ``` {/if} ここで入力IDと結果のロジット(モデルの出力)を見てみましょう。 {#if fw === 'pt'} ```python out Input IDs: [[ 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012]] Logits: [[-2.7276, 2.8789]] ``` {:else} ```py out Input IDs: tf.Tensor( [[ 1045 1005 2310 2042 3403 2005 1037 17662 12172 2607 2026 2878 2166 1012]], shape=(1, 14), dtype=int32) Logits: tf.Tensor([[-2.7276208 2.8789377]], shape=(1, 2), dtype=float32) ``` {/if} *バッチ処理*とは、複数の系列をまとめてモデルに入力することです。系列が1つしかない場合でも、バッチを構築することができます。 ``` batched_ids = [ids, ids] ``` これは2つの同じ系列からなるバッチとなっています。 <Tip> ✏️ **試してみよう!** この `batch_ids` をテンソルに変換し、モデルに入力してみましょう。前と同じロジット(モデル出力)が得られることを確認してください(ただし、二重になっていることに注意してください)。 </Tip> バッチ処理により、複数の系列をモデルに入力できるようになります。単一の系列でバッチを構築するのと同じように、簡単に複数の系列を使用することができます。ただし、ここで1つ問題があります。2つ以上の系列をバッチ処理する場合、系列の長さがそれぞれ異なる場合があります。これまでテンソルを扱ったことがある場合は、テンソルの形状は長方形である必要があることをご存知なのではないでしょうか。従って、異なる長さの系列の入力IDリストを直接テンソルに変換することはできません。この問題を回避するための方法として、入力を*パディング*することが一般的です。 ## 入力のパディング 以下の二重のリストはテンソルには変換できません。 ```py no-format batched_ids = [ [200, 200, 200], [200, 200] ] ``` この問題を回避するために、*パディング*を使用して、テンソルの形状を長方形にしてみましょう。パディングは、*パディングトークン*と呼ばれる特別な単語を短い系列に対して追加することで、すべての系列の長さを同じにします。例えば、10語の系列が10個、20語の系列が1個ある場合、パディングにより、すべての系列の長さが20語になります。上記の例では、結果として得られるテンソルは次のようになります。 ```py no-format padding_id = 100 batched_ids = [ [200, 200, 200], [200, 200, padding_id], ] ``` パティングトークンのIDは `tokenizer.pad_token_id` で見つけることができます。それでは、これを使って2つの系列を個別にモデルに入力する場合と、バッチ処理した場合の結果を比較してみましょう。 {#if fw === 'pt'} ```py no-format model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequence1_ids = [[200, 200, 200]] sequence2_ids = [[200, 200]] batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] print(model(torch.tensor(sequence1_ids)).logits) print(model(torch.tensor(sequence2_ids)).logits) print(model(torch.tensor(batched_ids)).logits) ``` ```python out tensor([[ 1.5694, -1.3895]], grad_fn=<AddmmBackward>) tensor([[ 0.5803, -0.4125]], grad_fn=<AddmmBackward>) tensor([[ 1.5694, -1.3895], [ 1.3373, -1.2163]], grad_fn=<AddmmBackward>) ``` {:else} ```py no-format model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequence1_ids = [[200, 200, 200]] sequence2_ids = [[200, 200]] batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] print(model(tf.constant(sequence1_ids)).logits) print(model(tf.constant(sequence2_ids)).logits) print(model(tf.constant(batched_ids)).logits) ``` ```py out tf.Tensor([[ 1.5693678 -1.3894581]], shape=(1, 2), dtype=float32) tf.Tensor([[ 0.5803005 -0.41252428]], shape=(1, 2), dtype=float32) tf.Tensor( [[ 1.5693681 -1.3894582] [ 1.3373486 -1.2163193]], shape=(2, 2), dtype=float32) ``` {/if} バッチ処理した予測のロジットについて何か違いがあるようです。2行目は2つ目の系列のロジットと同じであるべきですが、完全に異なる値となってしまっています! これは、Transformerモデルの代表的な特徴であるアテンション層が、それぞれのトークンに対して*コンテクスト化*を行っていることに起因します。アテンション層は、系列のすべてのトークンに注意(アテンション)を向けるため、パディングトークンも考慮の対象として扱います。異なる長さの系列を個別にモデルに入力する場合と、同じ系列をバッチ処理した場合の両方で同じ結果を得るためには、アテンション層にパディングトークンを無視するように指示する必要があります。これは、アテンションマスクを使用することで実現できます。 ## アテンションマスク *アテンションマスク*とは入力IDのテンソルと全く同じ形をしたテンソルのことで、0と1で構成されています。1は対応するトークンに注意を向けることを示し、0は対応するトークンに注意を向けないこと(つまり、アテンション層に無視されること)を示します。 前の例に対して、アテンションマスクを追加してみましょう。 {#if fw === 'pt'} ```py no-format batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] attention_mask = [ [1, 1, 1], [1, 1, 0], ] outputs = model(torch.tensor(batched_ids), attention_mask=torch.tensor(attention_mask)) print(outputs.logits) ``` ```python out tensor([[ 1.5694, -1.3895], [ 0.5803, -0.4125]], grad_fn=<AddmmBackward>) ``` {:else} ```py no-format batched_ids = [ [200, 200, 200], [200, 200, tokenizer.pad_token_id], ] attention_mask = [ [1, 1, 1], [1, 1, 0], ] outputs = model(tf.constant(batched_ids), attention_mask=tf.constant(attention_mask)) print(outputs.logits) ``` ```py out tf.Tensor( [[ 1.5693681 -1.3894582 ] [ 0.5803021 -0.41252586]], shape=(2, 2), dtype=float32) ``` {/if} これで、バッチ内の2つ目の系列について同じロジットが得られました。 2つ目の系列の最後の値がパディングIDであることに注目してください。これは、アテンションマスクの0の値となっています。 <Tip> ✏️ **試してみよう!** セクション2で使用した2つの文 ("I've been waiting for a HuggingFace course my whole life." と "I hate this so much!") を手動でトークン化してみましょう。そしてこれらをモデルに入力し、セクション2で得られたロジットと同じ結果となることを確認してみましょう。次に、パディングトークンを使用してこれらをバッチ処理し、適切なアテンションマスクを作成してみましょう。また同様にモデルに入力した際、セクション2で得られた結果と同じものになることを確認してみましょう。 </Tip> ## より長い系列 トランスフォーマーモデルでは、モデルに入力できる系列の長さに制限があります。ほとんどのモデルは512トークンまたは1024トークンの系列を処理できますが、これより長い系列を処理しようとするとクラッシュしてしまいます。この問題に対しては、2つの解決策があります。 - 長い系列を処理できるモデルを使用する - 系列を途中で区切って短くする 処理できる系列長はモデルによって異なり、非常に長い系列の処理に特化したモデルも存在します。[Longformer](https://huggingface.co/transformers/model_doc/longformer.html) はその一例です。また、[LED](https://huggingface.co/transformers/model_doc/led.html) も長い系列を処理できるモデルです。非常に長い系列を処理する必要があるタスクに取り組んでいる場合は、これらのモデルを見てみて下さい。 もう1つの手法として、`max_sequence_length` パラメータを指定して系列を途中で区切ることをお勧めします。 ```py sequence = sequence[:max_sequence_length] ```
course/chapters/ja/chapter2/5.mdx/0
{ "file_path": "course/chapters/ja/chapter2/5.mdx", "repo_id": "course", "token_count": 6319 }
124
# イントロダクション <CourseFloatingBanner chapter={8} classNames="absolute z-10 right-0 top-0" /> 🤗 Transformers を使いながらNLPタスクに取り組む方法がわかった後、自分自身のプロジェクトを簡単に始めることができます! この章では、エラーにぶつかったときにどうすればよいかを深く探ります。自分のコードやトレーニングを正確にデバッグする方法、そして自分でエラーを解決できない場合にコミュニティに助けを求める方法を一緒に学びましょう。また、HuggingFace (ハギングフェイス) ライブラリのひとつにバグを見つけたと思ったら、その問題をできるだけ早く解決するため報告する方法を紹介します。 この章では、一緒に次のことを学びます: - エラーを見つけた時に最初にすること - [ハギングフェイス フォーラム](https://discuss.huggingface.co/)の中で助けの求め方 - トレーニングパイプラインのデバグ方法 - 良いGitHubイシューの書き方 もちろん、このどれもが🤗 Transformersやハギングフェイスのエコシステムと特別な関係はありません。この章から得られる教訓は、ほとんどのオープンソースプロジェクトに適用可能です
course/chapters/ja/chapter8/1.mdx/0
{ "file_path": "course/chapters/ja/chapter8/1.mdx", "repo_id": "course", "token_count": 615 }
125
# 단원 소개[[introduction]] <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" /> [제1단원](/course/chapter1)에서 보았듯이, 트랜스포머 모델은 대부분 매우 큽니다. 수백만에서 *수백억*개의 파라미터를 가진 모델을 훈련시키고 배포하는 것은 만만치 않은데다가, 하루가 멀다하고 자체적으로 구현된 새로운 모델이 출시되어서, 모두 적용해보려고 한다면 쉽지는 않을 거예요. 🤗 Transformers 라이브러리는 이 문제를 해결하기 위해 만들어졌습니다. Transformer 모델을 가져오고, 훈련시킨 후 저장할 수 있는 단일 API를 제공하는 것이 목표예요. 라이브러리의 주요 기능은 다음과 같습니다. - **사용 편의성**: 추론하기 위해 최첨단 NLP 모델을 다운로드한 다음 적재시켜 사용하고 싶다면, 단 2줄의 코드만으로 할 수 있어요. - **유연성**: 기초적으로 보면 모든 모델은 단순한 PyTorch `nn.module` 또는 TensorFlow `tf.keras.Model` 클래스입니다. 각 머신러닝(ML) 프레임워크의 여타 다른 모델이나 마찬가지로 처리할 수 있다는 뜻이에요. - **단순성**: 라이브러리 위에 추상화를 거의 하지 않았어요. "모든 것을 파일 하나에"가 핵심 개념입니다. 모델의 순전파(forward propagation) 부분이 파일 한 개에 모두 정의되어 있어서, 코드 자체를 이해하고 해킹할 수도 있어요. 마지막 기능은 여타 ML 라이브러리들과는 다른 🤗 Transformers만의 차별점입니다. 모델은 파일 간에 공유되는 모듈로 만들어지지 않고, 모델마다 자체적인 레이어를 쌓습니다. 이렇게 하면 모델을 더 쉽게 보고 이해할 수 있으면서도, 다른 모델과는 상관없이 원하는 모델에서 마음껏 실험해볼 수 있습니다. 이 단원은 모델과 토크나이저로 [제1단원](/course/chapter1)에서 소개된 `pipeline()` 함수를 처음부터 끝까지 만들어보는 것으로 시작합니다. 만들고나면 모델 API를 더 깊게 탐구해봅니다. model과 configuration 클래스를 알아보고, 모델을 적재하는 방법과 수치를 입력으로 제공해서 예측이 출력되는 처리 과정을 보여드리겠습니다. 그런 다음 `pipeline()` 함수의 중요한 구성요소인 tokenizer API를 살펴보겠습니다. tokenizer는 처리의 첫 번째 단계인 텍스트를 신경망의 수치 입력으로 바꾸는 부분과 필요할 때 다시 텍스트로 바꾸는 마지막 단계, 즉 양끝을 다룹니다. 마지막으로 여러 문장을 묶어서 모델에게 제공하는 방법을 알아보고, 기존 `tokenizer()` 함수를 자세히 살펴봄으로써 마무리짓겠습니다. <Tip> ⚠️ Model Hub와 🤗 Transformers에서 사용할 수 있는 모든 기능을 활용하려면 <a href="https://huggingface.co/join">계정을 만드는 게</a> 좋습니다. </Tip>
course/chapters/ko/chapter2/1.mdx/0
{ "file_path": "course/chapters/ko/chapter2/1.mdx", "repo_id": "course", "token_count": 2406 }
126
# 2단원 완료! <CourseFloatingBanner chapter={8} classNames="absolute z-10 right-0 top-0" /> 축하합니다. 코스의 두 번째 부분을 통과했습니다! 세 번째 소식을 열심히 준비 중이니 [뉴스레터](https://huggingface.curated.co/)를 구독하여 소식을 놓치지 않도록 하세요. 이제 다양한 NLP 작업을 처리하고 이에 대한 모델을 파인 튜닝하거나 사전 학습할 수 있습니다. [모델 허브](https://huggingface.co/models)에서 커뮤니티와 결과를 공유하는 것을 잊지 마세요. 여러분이 얻은 지식으로 무엇을 구축할지 기대됩니다!
course/chapters/ko/chapter8/6.mdx/0
{ "file_path": "course/chapters/ko/chapter8/6.mdx", "repo_id": "course", "token_count": 479 }
127
<FrameworkSwitchCourse {fw} /> # Modelos {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter2/section3_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter2/section3_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter2/section3_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter2/section3_tf.ipynb"}, ]} /> {/if} {#if fw === 'pt'} <Youtube id="AhChOFRegn4"/> {:else} <Youtube id="d3JVgghSOew"/> {/if} {#if fw === 'pt'} Nesta seção, vamos analisar mais de perto a criação e a utilização de um modelo. Vamos utilizar a classe `AutoModel`, que é útil quando você quer instanciar qualquer modelo a partir de um checkpoint. A classe `AutoModel` e todas as classes filhas são na verdade simples wrapper sobre a grande variedade de modelos disponíveis na biblioteca. É um wrapper inteligente, pois pode automaticamente "adivinhar" a arquitetura apropriada do modelo para seu checkpoint, e então instancia um modelo com esta arquitetura. {:else} Nesta seção, vamos analisar mais de perto a criação e a utilização de um modelo. Vamos utilizar a classe `TFAutoModel`, que é útil quando você quer instanciar qualquer modelo a partir de um checkpoint. A classe `TFAutoModel` e todas as classes filhas são na verdade simples wrapper sobre a grande variedade de modelos disponíveis na biblioteca. É um wrapper inteligente, pois pode automaticamente "adivinhar" a arquitetura apropriada do modelo para seu checkpoint, e então instancia um modelo com esta arquitetura. {/if} Entretanto, se você conhece o tipo de modelo que deseja usar, pode usar diretamente a classe que define sua arquitetura. Vamos dar uma olhada em como isto funciona com um modelo BERT. ## Criando um Transformer A primeira coisa que precisamos fazer para inicializar um modelo BERT é carregar um objeto de configuração: {#if fw === 'pt'} ```py from transformers import BertConfig, BertModel # Construindo a configuração config = BertConfig() # Construindo o modelo a partir da configuração model = BertModel(config) ``` {:else} ```py from transformers import BertConfig, TFBertModel # Construindo a configuração config = BertConfig() # Construindo o modelo a partir da configuração model = TFBertModel(config) ``` {/if} A configuração contém muitos atributos que são usados para construir o modelo: ```py print(config) ``` ```python out BertConfig { [...] "hidden_size": 768, "intermediate_size": 3072, "max_position_embeddings": 512, "num_attention_heads": 12, "num_hidden_layers": 12, [...] } ``` Embora você ainda não tenha visto o que todos esses atributos fazem, você deve reconhecer alguns deles: o atributo `hidden_size` define o tamanho do vetor `hidden_states`, e o `num_hidden_layers` define o número de camadas que o Transformer possui. ### Diferentes métodos de inicializar o modelo A criação de um modelo a partir da configuração padrão o inicializa com valores aleatórios: {#if fw === 'pt'} ```py from transformers import BertConfig, BertModel config = BertConfig() model = BertModel(config) # O modelo é inicializado aleatoriamente! ``` {:else} ```py from transformers import BertConfig, TFBertModel config = BertConfig() model = TFBertModel(config) # O modelo é inicializado aleatoriamente! ``` {/if} O modelo pode ser utilizado neste estado, mas produzirá saídas errôneas; ele precisa ser treinado primeiro. Poderíamos treinar o modelo a partir do zero na tarefa em mãos, mas como você viu em [Capítulo 1](/course/pt/chapter1), isto exigiria muito tempo e muitos dados, e teria um impacto ambiental não negligenciável. Para evitar esforços desnecessários e duplicados, normalmente é possível compartilhar e reutilizar modelos que já foram treinados. Carregar um Transformer já treinado é simples - podemos fazer isso utilizando o método `from_pretrained()`: {#if fw === 'pt'} ```py from transformers import BertModel model = BertModel.from_pretrained("bert-base-cased") ``` Como você viu anteriormente, poderíamos substituir o `BertModel` pela classe equivalente ao `AutoModel`. Faremos isto de agora em diante, pois isto produz um código generalista a partir de um checkpoint; se seu código funciona para checkpoint, ele deve funcionar perfeitamente com outro. Isto se aplica mesmo que a arquitetura seja diferente, desde que o checkpoint tenha sido treinado para uma tarefa semelhante (por exemplo, uma tarefa de análise de sentimento). {:else} ```py from transformers import TFBertModel model = TFBertModel.from_pretrained("bert-base-cased") ``` Como você viu anteriormente, poderíamos substituir o `TFBertModel` pela classe equivalente ao `TFAutoModel`. Faremos isto de agora em diante, pois isto produz um código generalista a partir de um checkpoint; se seu código funciona para checkpoint, ele deve funcionar perfeitamente com outro. Isto se aplica mesmo que a arquitetura seja diferente, desde que o checkpoint tenha sido treinado para uma tarefa semelhante (por exemplo, uma tarefa de análise de sentimento). {/if} No exemplo de código acima não utilizamos `BertConfig`, e em vez disso carregamos um modelo pré-treinado através do identificador `bert-base-cased`. Este é um checkpoint do modelo que foi treinado pelos próprios autores do BERT; você pode encontrar mais detalhes sobre ele em seu [model card](https://huggingface.co/bert-base-cased). Este modelo agora é inicializado com todos os pesos do checkpoint. Ele pode ser usado diretamente para inferência sobre as tarefas nas quais foi treinado, e também pode ser *fine-tuned* (aperfeiçoado) em uma nova tarefa. Treinando com pesos pré-treinados e não do zero, podemos rapidamente alcançar bons resultados. Os pesos foram baixados e armazenados em cache (logo, para as futuras chamadas do método `from_pretrained()` não será realizado o download novamente) em sua respectiva pasta, que tem como padrão o path *~/.cache/huggingface/transformers*. Você pode personalizar sua pasta de cache definindo a variável de ambiente `HF_HOME`. O identificador usado para carregar o modelo pode ser o identificador de qualquer modelo no Model Hub, desde que seja compatível com a arquitetura BERT. A lista completa dos checkpoints BERT disponíveis podem ser encontrada [aqui].https://huggingface.co/models?filter=bert). ### Métodos para salvar/armazenar o modelo Salvar um modelo é tão fácil quanto carregar um - utilizamos o método `save_pretrained()`, que é análogo ao método `from_pretrained()`: ```py model.save_pretrained("path_no_seu_computador") ``` Isto salva dois arquivos em seu disco: {#if fw === 'pt'} ``` ls path_no_seu_computador config.json pytorch_model.bin ``` {:else} ``` ls path_no_seu_computador config.json tf_model.h5 ``` {/if} Se você der uma olhada no arquivo *config.json*, você reconhecerá os atributos necessários para construir a arquitetura modelo. Este arquivo também contém alguns metadados, como a origem do checkpoint e a versão 🤗 Transformers que você estava usando quando salvou o checkpoint pela última vez. {#if fw === 'pt'} O arquivo *pytorch_model.bin* é conhecido como o *dicionário de estado*; ele contém todos os pesos do seu modelo. Os dois arquivos andam de mãos dadas; a configuração é necessária para conhecer a arquitetura de seu modelo, enquanto os pesos do modelo são os parâmetros de seu modelo. {:else} O arquivo *tf_model.h5* é conhecido como o *dicionário de estado*; ele contém todos os pesos do seu modelo. Os dois arquivos andam de mãos dadas; a configuração é necessária para conhecer a arquitetura de seu modelo, enquanto os pesos do modelo são os parâmetros de seu modelo. {/if} ## Usando um modelo de Transformer para inferência Agora que você sabe como carregar e salvar um modelo, vamos tentar usá-lo para fazer algumas predições. Os Transformers só podem processar números - números que o tokenizer gera. Mas antes de discutirmos os tokenizers, vamos explorar quais entradas o modelo aceita. Os Tokenizers podem se encarregar de lançar as entradas nos tensores da estrutura apropriada, mas para ajudá-lo a entender o que está acontecendo, vamos dar uma rápida olhada no que deve ser feito antes de enviar as entradas para o modelo. Digamos que temos um par de sequências: ```py sequences = ["Hello!", "Cool.", "Nice!"] ``` O tokenizer os converte em índices de vocabulário que são normalmente chamados de *IDs de entrada*. Cada sequência é agora uma lista de números! A saída resultante é: ```py no-format encoded_sequences = [ [101, 7592, 999, 102], [101, 4658, 1012, 102], [101, 3835, 999, 102], ] ``` Esta é uma lista de sequências codificadas: uma lista de listas. Os tensores só aceitam shapes (tamanhos) retangulares (pense em matrizes). Esta "matriz" já é de forma retangular, portanto, convertê-la em um tensor é fácil: {#if fw === 'pt'} ```py import torch model_inputs = torch.tensor(encoded_sequences) ``` {:else} ```py import tensorflow as tf model_inputs = tf.constant(encoded_sequences) ``` {/if} ### Usando os tensores como entradas para o modelo Fazer uso dos tensores com o modelo é extremamente simples - chamamos apenas o modelo com os inputs: ```py output = model(model_inputs) ``` Embora o modelo aceite muitos argumentos diferentes, apenas os IDs de entrada são necessários. Explicaremos o que os outros argumentos fazem e quando eles são necessários mais tarde, mas primeiro precisamos olhar mais de perto os tokenizers que constroem as entradas que um Transformer pode compreender.
course/chapters/pt/chapter2/3.mdx/0
{ "file_path": "course/chapters/pt/chapter2/3.mdx", "repo_id": "course", "token_count": 3709 }
128
# Big data? 🤗 Datasets ao resgate <CourseFloatingBanner chapter={5} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/pt/chapter5/section4.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/pt/chapter5/section4.ipynb"}, ]} /> Hoje em dia, não é incomum encontrar-se trabalhando com conjuntos de dados de vários gigabytes, especialmente se você planeja pré-treinar um transformer como BERT ou GPT-2 do zero. Nesses casos, até mesmo _carregar_ os dados pode ser um desafio. Por exemplo, o corpus WebText usado para pré-treinar o GPT-2 consiste em mais de 8 milhões de documentos e 40 GB de texto - carregar isso na RAM do seu laptop provavelmente lhe causará um ataque cardíaco! Felizmente, 🤗 Datasets foram projetados para superar essas limitações. Ele libera você de problemas de gerenciamento de memória tratando conjuntos de dados como arquivos _memory-mapped_ e de limites de disco rígido por _streaming_ das entradas em um corpus. <Youtube id="JwISwTCPPWo"/> Nesta seção, exploraremos esses recursos de 🤗 Conjuntos de dados com um enorme corpus de 825 GB conhecido como [the Pile](https://pile.eleuther.ai). Vamos começar! ## O que é the Pile? O `The Pile` é um corpus de texto em inglês que foi criado por [EleutherAI](https://www.eleuther.ai) para treinar modelos de linguagem em larga escala. Ele inclui uma gama diversificada de conjuntos de dados, abrangendo artigos científicos, repositórios de código do GitHub e texto da web filtrado. O corpus de treinamento está disponível em [blocos de 14 GB](https://the-eye.eu/public/AI/pile/), e você também pode baixar vários dos [componentes individuais](https://the-eye.eu/public/AI/pile_preliminary_components/). Vamos começar dando uma olhada no conjunto de dados PubMed Abstracts, que é um corpus de resumos de 15 milhões de publicações biomédicas no [PubMed](https://pubmed.ncbi.nlm.nih.gov/). O conjunto de dados está em [formato JSON Lines](https://jsonlines.org) e é compactado usando a biblioteca `zstandard`, então primeiro precisamos instalá-lo: ```py !pip install zstandard ``` Em seguida, podemos carregar o conjunto de dados usando o método para arquivos remotos que aprendemos na [seção 2](/course/chapter5/2): ```py from datasets import load_dataset # This takes a few minutes to run, so go grab a tea or coffee while you wait :) data_files = "https://the-eye.eu/public/AI/pile_preliminary_components/PUBMED_title_abstracts_2019_baseline.jsonl.zst" pubmed_dataset = load_dataset("json", data_files=data_files, split="train") pubmed_dataset ``` ```python out Dataset({ features: ['meta', 'text'], num_rows: 15518009 }) ``` Podemos ver que há 15.518.009 linhas e 2 colunas em nosso conjunto de dados - isso é muito! <Tip> ✎ Por padrão, 🤗 Datasets descompactará os arquivos necessários para carregar um dataset. Se você quiser preservar espaço no disco rígido, você pode passar `DownloadConfig(delete_extracted=True)` para o argumento `download_config` de `load_dataset()`. Consulte a [documentação](https://huggingface.co/docs/datasets/package_reference/builder_classes#datasets.DownloadConfig) para obter mais detalhes. </Tip> Vamos inspecionar o conteúdo do primeiro exemplo: ```py pubmed_dataset[0] ``` ```python out {'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection.\nTo determine the prevalence of hypoxaemia in children aged under 5 years suffering acute lower respiratory infections (ALRI), the risk factors for hypoxaemia in children under 5 years of age with ALRI, and the association of hypoxaemia with an increased risk of dying in children of the same age ...'} ``` Ok, isso parece o resumo de um artigo médico. Agora vamos ver quanta RAM usamos para carregar o conjunto de dados! ## A magia do mapeamento de memória Uma maneira simples de medir o uso de memória em Python é com a biblioteca [`psutil`](https://psutil.readthedocs.io/en/latest/), que pode ser instalada com `pip` da seguinte forma: ```python !pip install psutil ``` Ele fornece uma classe `Process` que nos permite verificar o uso de memória do processo atual da seguinte forma: ```py import psutil # Process.memory_info is expressed in bytes, so convert to megabytes print(f"RAM used: {psutil.Process().memory_info().rss / (1024 * 1024):.2f} MB") ``` ```python out RAM used: 5678.33 MB ``` Aqui o atributo `rss` refere-se ao _tamanho do conjunto residente_, que é a fração de memória que um processo ocupa na RAM. Essa medida também inclui a memória usada pelo interpretador Python e as bibliotecas que carregamos, portanto, a quantidade real de memória usada para carregar o conjunto de dados é um pouco menor. Para comparação, vamos ver o tamanho do conjunto de dados no disco, usando o atributo `dataset_size`. Como o resultado é expresso em bytes como antes, precisamos convertê-lo manualmente para gigabytes: ```py print(f"Number of files in dataset : {pubmed_dataset.dataset_size}") size_gb = pubmed_dataset.dataset_size / (1024**3) print(f"Dataset size (cache file) : {size_gb:.2f} GB") ``` ```python out Number of files in dataset : 20979437051 Dataset size (cache file) : 19.54 GB ``` Legal -- apesar de ter quase 20 GB de tamanho, podemos carregar e acessar o conjunto de dados com muito menos RAM! <Tip> ✏️ **Experimente!** Escolha um dos [subconjuntos](https://the-eye.eu/public/AI/pile_preliminary_components/) da `The Pile` que é maior que a RAM do seu laptop ou desktop, carregue com 🤗 Datasets e meça a quantidade de RAM usada. Observe que, para obter uma medição precisa, você desejará fazer isso em um novo processo. Você pode encontrar os tamanhos descompactados de cada subconjunto na Tabela 1 do [artigo do `The Pile`](https://arxiv.org/abs/2101.00027). </Tip> Se você estiver familiarizado com Pandas, esse resultado pode ser uma surpresa por causa da famosa [regra de ouro] de Wes Kinney (https://wesmckinney.com/blog/apache-arrow-pandas-internals/) de que você normalmente precisa de 5 para 10 vezes mais RAM do que o tamanho do seu conjunto de dados. Então, como 🤗 Datasets resolve esse problema de gerenciamento de memória? 🤗 Os conjuntos de dados tratam cada conjunto de dados como um [arquivo mapeado em memória](https://en.wikipedia.org/wiki/Memory-mapped_file), que fornece um mapeamento entre RAM e armazenamento do sistema de arquivos que permite que a biblioteca acesse e opere em elementos do conjunto de dados sem precisar carregá-lo totalmente na memória. Arquivos mapeados em memória também podem ser compartilhados em vários processos, o que permite que métodos como `Dataset.map()` sejam paralelizados sem a necessidade de mover ou copiar o conjunto de dados. Sob o capô, esses recursos são todos realizados pelo formato de memória [Apache Arrow](https://arrow.apache.org) e [`pyarrow`](https://arrow.apache.org/docs/python/index.html), que tornam o carregamento e o processamento de dados extremamente rápidos. (Para mais detalhes sobre o Apache Arrow e comparações com o Pandas, confira [post do blog de Dejan Simic](https://towardsdatascience.com/apache-arrow-read-dataframe-with-zero-memory-69634092b1a).) Para ver isso em ação, vamos executar um pequeno teste de velocidade iterando sobre todos os elementos no conjunto de dados PubMed Abstracts: ```py import timeit code_snippet = """batch_size = 1000 for idx in range(0, len(pubmed_dataset), batch_size): _ = pubmed_dataset[idx:idx + batch_size] """ time = timeit.timeit(stmt=code_snippet, number=1, globals=globals()) print( f"Iterated over {len(pubmed_dataset)} examples (about {size_gb:.1f} GB) in " f"{time:.1f}s, i.e. {size_gb/time:.3f} GB/s" ) ``` ```python out 'Iterated over 15518009 examples (about 19.5 GB) in 64.2s, i.e. 0.304 GB/s' ``` Aqui usamos o módulo `timeit` do Python para medir o tempo de execução do `code_snippet`. Normalmente, você poderá iterar em um conjunto de dados a uma velocidade de alguns décimos de GB/s a vários GB/s. Isso funciona muito bem para a grande maioria dos aplicativos, mas às vezes você terá que trabalhar com um conjunto de dados grande demais para ser armazenado no disco rígido do seu laptop. Por exemplo, se tentássemos baixar o Pile por completo, precisaríamos de 825 GB de espaço livre em disco! Para lidar com esses casos, 🤗 Datasets fornece um recurso de streaming que nos permite baixar e acessar elementos em tempo real, sem a necessidade de baixar todo o conjunto de dados. Vamos dar uma olhada em como isso funciona. <Tip> 💡 Nos notebooks Jupyter, você também pode cronometrar células usando a [`%%timeit` função mágica](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-timeit). </Tip> ## Conjuntos de dados em streaming Para habilitar o streaming do conjunto de dados você só precisa passar o argumento `streaming=True` para a função `load_dataset()`. Por exemplo, vamos carregar o conjunto de dados PubMed Abstracts novamente, mas em modo streaming: ```py pubmed_dataset_streamed = load_dataset( "json", data_files=data_files, split="train", streaming=True ) ``` Em vez do familiar `Dataset` que encontramos em outro lugar neste capítulo, o objeto retornado com `streaming=True` é um `IterableDataset`. Como o nome sugere, para acessar os elementos de um `IterableDataset` precisamos iterar sobre ele. Podemos acessar o primeiro elemento do nosso conjunto de dados transmitido da seguinte forma: ```py next(iter(pubmed_dataset_streamed)) ``` ```python out {'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection.\nTo determine the prevalence of hypoxaemia in children aged under 5 years suffering acute lower respiratory infections (ALRI), the risk factors for hypoxaemia in children under 5 years of age with ALRI, and the association of hypoxaemia with an increased risk of dying in children of the same age ...'} ``` Os elementos de um conjunto de dados transmitido podem ser processados dinamicamente usando `IterableDataset.map()`, o que é útil durante o treinamento se você precisar tokenizar as entradas. O processo é exatamente o mesmo que usamos para tokenizar nosso conjunto de dados no [Capítulo 3](/course/chapter3), com a única diferença de que as saídas são retornadas uma a uma: ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("distilbert-base-uncased") tokenized_dataset = pubmed_dataset_streamed.map(lambda x: tokenizer(x["text"])) next(iter(tokenized_dataset)) ``` ```python out {'input_ids': [101, 4958, 5178, 4328, 6779, ...], 'attention_mask': [1, 1, 1, 1, 1, ...]} ``` <Tip> 💡 Para acelerar a tokenização com streaming você pode passar `batched=True`, como vimos na última seção. Ele processará os exemplos lote por lote; o tamanho do lote padrão é 1.000 e pode ser especificado com o argumento `batch_size`. </Tip> Você também pode embaralhar um conjunto de dados transmitido usando `IterableDataset.shuffle()`, mas, diferentemente de `Dataset.shuffle()`, isso apenas embaralha os elementos em um `buffer_size` predefinido: ```py shuffled_dataset = pubmed_dataset_streamed.shuffle(buffer_size=10_000, seed=42) next(iter(shuffled_dataset)) ``` ```python out {'meta': {'pmid': 11410799, 'language': 'eng'}, 'text': 'Randomized study of dose or schedule modification of granulocyte colony-stimulating factor in platinum-based chemotherapy for elderly patients with lung cancer ...'} ``` Neste exemplo, selecionamos um exemplo aleatório dos primeiros 10.000 exemplos no buffer. Uma vez que um exemplo é acessado, seu lugar no buffer é preenchido com o próximo exemplo no corpus (ou seja, o 10.001º exemplo no caso acima). Você também pode selecionar elementos de um conjunto de dados transmitido usando as funções `IterableDataset.take()` e `IterableDataset.skip()`, que agem de maneira semelhante a `Dataset.select()`. Por exemplo, para selecionar os primeiros 5 exemplos no conjunto de dados PubMed Abstracts, podemos fazer o seguinte: ```py dataset_head = pubmed_dataset_streamed.take(5) list(dataset_head) ``` ```python out [{'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection ...'}, {'meta': {'pmid': 11409575, 'language': 'eng'}, 'text': 'Clinical signs of hypoxaemia in children with acute lower respiratory infection: indicators of oxygen therapy ...'}, {'meta': {'pmid': 11409576, 'language': 'eng'}, 'text': "Hypoxaemia in children with severe pneumonia in Papua New Guinea ..."}, {'meta': {'pmid': 11409577, 'language': 'eng'}, 'text': 'Oxygen concentrators and cylinders ...'}, {'meta': {'pmid': 11409578, 'language': 'eng'}, 'text': 'Oxygen supply in rural africa: a personal experience ...'}] ``` Da mesma forma, você pode usar a função `IterableDataset.skip()` para criar divisões de treinamento e validação de um conjunto de dados embaralhado da seguinte forma: ```py # Skip the first 1,000 examples and include the rest in the training set train_dataset = shuffled_dataset.skip(1000) # Take the first 1,000 examples for the validation set validation_dataset = shuffled_dataset.take(1000) ``` Vamos completar nossa exploração de streaming de conjuntos de dados com um aplicativo comum: combinar vários conjuntos de dados para criar um único corpus. 🤗 Datasets fornece uma função `interleave_datasets()` que converte uma lista de objetos `IterableDataset` em um único `IterableDataset`, onde os elementos do novo conjunto de dados são obtidos alternando entre os exemplos de origem. Essa função é especialmente útil quando você está tentando combinar grandes conjuntos de dados, então, como exemplo, vamos transmitir o subconjunto FreeLaw do Pile, que é um conjunto de dados de 51 GB de pareceres jurídicos dos tribunais dos EUA: ```py law_dataset_streamed = load_dataset( "json", data_files="https://the-eye.eu/public/AI/pile_preliminary_components/FreeLaw_Opinions.jsonl.zst", split="train", streaming=True, ) next(iter(law_dataset_streamed)) ``` ```python out {'meta': {'case_ID': '110921.json', 'case_jurisdiction': 'scotus.tar.gz', 'date_created': '2010-04-28T17:12:49Z'}, 'text': '\n461 U.S. 238 (1983)\nOLIM ET AL.\nv.\nWAKINEKONA\nNo. 81-1581.\nSupreme Court of United States.\nArgued January 19, 1983.\nDecided April 26, 1983.\nCERTIORARI TO THE UNITED STATES COURT OF APPEALS FOR THE NINTH CIRCUIT\n*239 Michael A. Lilly, First Deputy Attorney General of Hawaii, argued the cause for petitioners. With him on the brief was James H. Dannenberg, Deputy Attorney General...'} ``` Esse conjunto de dados é grande o suficiente para sobrecarregar a RAM da maioria dos laptops, mas conseguimos carregá-lo e acessá-lo sem suar a camisa! Vamos agora combinar os exemplos dos conjuntos de dados FreeLaw e PubMed Abstracts com a função `interleave_datasets()`: ```py from itertools import islice from datasets import interleave_datasets combined_dataset = interleave_datasets([pubmed_dataset_streamed, law_dataset_streamed]) list(islice(combined_dataset, 2)) ``` ```python out [{'meta': {'pmid': 11409574, 'language': 'eng'}, 'text': 'Epidemiology of hypoxaemia in children with acute lower respiratory infection ...'}, {'meta': {'case_ID': '110921.json', 'case_jurisdiction': 'scotus.tar.gz', 'date_created': '2010-04-28T17:12:49Z'}, 'text': '\n461 U.S. 238 (1983)\nOLIM ET AL.\nv.\nWAKINEKONA\nNo. 81-1581.\nSupreme Court of United States.\nArgued January 19, 1983.\nDecided April 26, 1983.\nCERTIORARI TO THE UNITED STATES COURT OF APPEALS FOR THE NINTH CIRCUIT\n*239 Michael A. Lilly, First Deputy Attorney General of Hawaii, argued the cause for petitioners. With him on the brief was James H. Dannenberg, Deputy Attorney General...'}] ``` Aqui usamos a função `islice()` do módulo `itertools` do Python para selecionar os dois primeiros exemplos do conjunto de dados combinado e podemos ver que eles correspondem aos primeiros exemplos de cada um dos dois conjuntos de dados de origem. Por fim, se você quiser transmitir o Pile em sua totalidade de 825 GB, poderá pegar todos os arquivos preparados da seguinte maneira: ```py base_url = "https://the-eye.eu/public/AI/pile/" data_files = { "train": [base_url + "train/" + f"{idx:02d}.jsonl.zst" for idx in range(30)], "validation": base_url + "val.jsonl.zst", "test": base_url + "test.jsonl.zst", } pile_dataset = load_dataset("json", data_files=data_files, streaming=True) next(iter(pile_dataset["train"])) ``` ```python out {'meta': {'pile_set_name': 'Pile-CC'}, 'text': 'It is done, and submitted. You can play “Survival of the Tastiest” on Android, and on the web...'} ``` <Tip> ✏️ **Experimente!** Use um dos grandes corpora Common Crawl como [`mc4`](https://huggingface.co/datasets/mc4) ou [`oscar`](https://huggingface.co/datasets/oscar) para criar um conjunto de dados multilíngue de streaming que represente as proporções faladas de idiomas em um país de sua escolha. Por exemplo, as quatro línguas nacionais na Suíça são alemão, francês, italiano e romanche, então você pode tentar criar um corpus suíço amostrando os subconjuntos do Oscar de acordo com sua proporção falada. </Tip> Agora você tem todas as ferramentas necessárias para carregar e processar conjuntos de dados de todas as formas e tamanhos, mas, a menos que tenha muita sorte, chegará um ponto em sua jornada de PNL em que você terá que criar um conjunto de dados para resolver o problema. problema em mãos. Esse é o tema da próxima seção!
course/chapters/pt/chapter5/4.mdx/0
{ "file_path": "course/chapters/pt/chapter5/4.mdx", "repo_id": "course", "token_count": 6510 }
129
# Введение <CourseFloatingBanner chapter={1} classNames="absolute z-10 right-0 top-0" /> ## Добро пожаловать на 🤗 курс! <Youtube id="00GKzGyWFEs" /> В этом курсе вы научитесь основам обработки естественного языка (NLP) с использованием библиотек от [Hugging Face](https://huggingface.co/). Экосистема состоит из: моделей ([🤗 Transformers](https://github.com/huggingface/transformers)), датасетов ([🤗 Datasets](https://github.com/huggingface/datasets)), вспомогательных библиотек ([🤗 Accelerate](https://github.com/huggingface/accelerate), [🤗 Tokenizers](https://github.com/huggingface/tokenizers)), а также репозитория [Hugging Face Hub](https://huggingface.co/models). Это полностью бесплатно! ## Чего ожидать от курса? Краткое описание курса: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary.svg" alt="Brief overview of the chapters of the course."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/summary-dark.svg" alt="Brief overview of the chapters of the course."> </div> - Главы 1-4 содержат в себе введение в главные концепции библиотеки 🤗 Transformers. К концу этой части курса вы будете знакомы с тем, как функционируют трансформеры, как применять модели из репозитория [Hugging Face Hub](https://huggingface.co/models), как дообучить модели на собственных данных и опубликовать результаты на Hugging Face Hub! - Главы 5-8 научат вас основам библиотек 🤗 Datasets и 🤗 Tokenizers (датасеты и токенизаторы); это необходимо для дальнейшего погружения в область обработки естественного языка. К концу этой части вы научитесь решать наиболее распространенные задачи в  NLP самостоятельно! - Главы 9-12 выходят за рамки NLP, в них описано, как можно применять трансформеры в задачах обработки речи и компьютерном зрении. Также вы узнаете, как создавать и демонстрировать свои модели, оптимизировать их для промышленного использования. После изучения этой части вы будете в силах применить 🤗 Transformers к (почти) любой задаче машинного обучения! Этот курс: * Требует хорошего знания Python * Будет лучше усвоен после ознакомления с курсом по глубокому обучению, например: [fast.ai's](https://www.fast.ai/) [Practical Deep Learning for Coders](https://course.fast.ai/) или одной из программ, подготовленных [DeepLearning.AI](https://www.deeplearning.ai/) * Не предполагает предварительных знаний библиотек: [PyTorch](https://pytorch.org/) или [TensorFlow](https://www.tensorflow.org/), однако знакомство с ними поможет вам в дальнейшем После прохождения текущего курса мы рекомендуем ознакомиться со специализацией от DeepLearning.AI: [Natural Language Processing Specialization](https://www.coursera.org/specializations/natural-language-processing?utm_source=deeplearning-ai&utm_medium=institutions&utm_campaign=20211011-nlp-2-hugging_face-page-nlp-refresh), которая покрывает широкий спектр традиционных моделей NLP: от наивного Байеса до LSTM-сетей! ## Кто мы? Об авторах: [**Abubakar Abid**](https://huggingface.co/abidlabs) окончил PhD в области прикладного машинного обучения в Стэндфордском университете. Во время PhD, он основал [Gradio](https://github.com/gradio-app/gradio) - свободная библиотека для Python, с помощью которой увидели свет свыше 600000 тысяч демоверсий моделей машинного обучения. Hugging Face приобрел Gradio, и теперь Abubakar работает с нами в качестве руководителя разработки машинного обучения. [**Matthew Carrigan**](https://huggingface.co/Rocketknight1) - ML-инженер в Hugging Face. Живет в Дублине, Ирландия, и ранее работал инженером по машинному обучению в Parse.ly, а до этого — научным сотрудником в Тринити-колледже в Дублине. Он не верит, что мы сможем реализовать теорию сильного искусственного интеллекта за счет масштабирования существующих архитектур, но все равно возлагает большие надежды на бессмертие роботов. [**Lysandre Debut**](https://huggingface.co/lysandre) - ML-инженер в Hugging Face, работает над библиотекой 🤗 Transformers с самых ранних этапов разработки. Его цель — сделать NLP доступным для всех, разработав инструменты с очень простым API. [**Sylvain Gugger**](https://huggingface.co/sgugger) – инженер-исследователь в Hugging Face и один из ключевых участников разработки библиотеки 🤗 Transformers. Ранее работал научным сотрудником в fast.ai и написал книгу _[Deep Learning for Coders with fastai and PyTorch](https://learning.oreilly.com/library/view/deep-learning-for/9781492045519/)_ в соавторстве с Jeremy Howard. Основное внимание в его исследованиях уделяется тому, чтобы сделать глубокое обучение более доступным путем разработки и улучшения методов, позволяющих моделям быстро обучаться при ограниченных ресурсах. [**Dawood Khan**](https://huggingface.co/dawoodkhan82) - ML-инженер в Hugging Face. Dawood из Нью-Йорка, где он окончил Нью-Йоркский университет и получил степень бакалавра компьютерных наук. Проработав несколько лет iOS инженером, Dawood решил сменить работу и стал сооснователем Gradio. Позднее Hugging Face приобрел Gradio. [**Merve Noyan**](https://huggingface.co/merve) - developer advocate в Hugging Face, работает над разработкой инструментов и созданием контента на их основе, чтобы машинное обучение более доступным. [**Lucile Saulnier**](https://huggingface.co/SaulLu) - ML-инженер в Hugging Face, разрабатывающая и поддерживающая использование инструментов с открытым исходным кодом. Она также активно участвует во многих исследовательских проектах в области NLP, таких как совместное обучение и BigScience. [**Lewis Tunstall**](https://huggingface.co/lewtun) - ML-инженер в Hugging Face, сосредоточен на разработке инструментов с открытым исходным кодом и обеспечении их доступности для более широкого сообщества. Соавтор будущей книги [O’Reilly book on Transformers](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/). [**Leandro von Werra**](https://huggingface.co/lvwerra) - ML-инженер в команде, работающей над открытым исходным кодом Hugging Face и соавтор будущей будущей книги [O’Reilly book on Transformers](https://www.oreilly.com/library/view/natural-language-processing/9781098136789/). Обладает большим опытом реализации NLP-проектов в промышленности. ## ЧАВО Мы собрали ответы на несколько часто задаваемых вопросов: - **Получу ли я сертификат после прохождения этого курса?** На данный момент у нас нет сертификации для этого курса. Мы работаем над получением сертификации для экосистемы Hugging Face. Следите за новостями! - **Сколько времени мне нужно будет потратить на прохождение этого курса?** Каждая глава этого курса рассчитана на неделю работы, то есть примерно 6-8 часов в неделю. Однако, вы можете проходить курс в любом удобном для вас ритме. - **Где я могу задать вопрос по материалам курса?** Если у вас возникли какие-либо вопросы по поводу любой части курса, просто нажмите на "*Ask a question*" наверху страницы, и вы будете автоматически перенаправлены в соответствующий раздел [форума Hugging Face](https://discuss.huggingface.co/) (форум на английском языке): <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/forum-button.png" alt="Link to the Hugging Face forums" width="75%"> Обратите внимание, что на форуме также доступен список [идей для проектов](https://discuss.huggingface.co/c/course/course-event/25), если вы хотите применить полученные знания на практике после прохождения курса. - **Где я могу посмотреть на код, используемый в этом курсе?** Внутри каждого раздела наверху страницы есть баннер, который позволит запустить код в Google Colab или Amazon SageMaker Studio Lab: <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter1/notebook-buttons.png" alt="Link to the Hugging Face course notebooks" width="75%"> Блокноты Jupyter со всем кодом, используемом в материалах курса, доступны в репозитории [`huggingface/notebooks`](https://github.com/huggingface/notebooks). Если вы хотите сгенерировать их на своем компьютере, вы можете найти инструкцию в репозитории [`course`](https://github.com/huggingface/course#-jupyter-notebooks) на GitHub. - **Как я могу внести свой вклад в развитие курса?** Существует множество способов внести свой вклад в наш курс! Если вы найдете опечатку или баг, пожалуйста, откройте вопрос (issue) в репозитории [`course`](https://github.com/huggingface/course). Если вы хотите помочь с переводом на ваш родной язык, вы можете найти инструкцию [здесь](https://github.com/huggingface/course#translating-the-course-into-your-language). - **Какие стандарты использовались при переводе?** Каждый перевод содержит глоссарий и файл `TRANSLATING.txt`, в которых описаны стандарты, используемые для перевода терминов и т.д. Вы можете посмотреть на пример для немецкого языка [здесь](https://github.com/huggingface/course/blob/main/chapters/de/TRANSLATING.txt). - **Могу ли я использовать этот курс в своих целях?** Конечно! Этот курс распространяется по либеральной лицензии [Apache 2 license](https://www.apache.org/licenses/LICENSE-2.0.html). Это означает, что вы должны упомянуть создателей этого курса, предоставить ссылку на лицензию и обозначить все изменения. Все это может быть сделано любым приемлемым способов, который, однако, не подразумевает, что правообладатель поддерживает вас или ваши действия по отношению этого курса. Если вы хотите процитировать этот курс, пожалуйста, используйте следующий BibTex: ``` @misc{huggingfacecourse, author = {Hugging Face}, title = {The Hugging Face Course, 2022}, howpublished = "\url{https://huggingface.co/course}", year = {2022}, note = "[Online; accessed <today>]" } ``` ## Поехали! Вы готовы начать? В этой главе вы узнаете: * Как использовать `pipeline()` для решения NLP-задач генерации и классификации текста * Об архитектуре трансформеров * Как различать архитектуры кодировщика, декодировщика и кодировщика-декодировщика и варианты их использования
course/chapters/ru/chapter1/1.mdx/0
{ "file_path": "course/chapters/ru/chapter1/1.mdx", "repo_id": "course", "token_count": 7983 }
130
# Базовое использование завершено![[basic-usage-completed]] <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" /> Отличная работа по изучению курса до этого места! Напомним, что в этой главе вы: - Узнали об основных составляющих блоках модели Transformer. - Узнали, из чего состоит конвейер токенизации. - Узнали, как использовать модель Transformer на практике. - Узнали, как использовать токенизатор для преобразования текста в тензоры, понятные модели. - Настроили токенизатор и модель вместе, чтобы перейти от текста к прогнозам. - Узнали об ограничениях входных идентификаторов и познакомились с масками внимания. - Поиграли с универсальными и настраиваемыми методами токенизатора. С этого момента вы должны свободно ориентироваться в документации 🤗 Transformers: лексикон будет звучать знакомо, и вы уже познакомились с методами, которые будете использовать чаще всего.
course/chapters/ru/chapter2/7.mdx/0
{ "file_path": "course/chapters/ru/chapter2/7.mdx", "repo_id": "course", "token_count": 874 }
131
# Что делать, если моего датасета на нет на Hub? <DocNotebookDropdown classNames="absolute z-10 right-0 top-0" options={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/chapter5/section2.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/chapter5/section2.ipynb"}, ]} /> Вы знаете, как использовать [Hugging Face Hub](https://huggingface.co/datasets) для скачивания датасетов, но часто складывается ситуация, когда нужные данные не хранятся у вас локально или на удаленном сервере. В этом разделе мы посмотрим, как библиотека 🤗 Datasets может быть использована для загрузки датасетов, которые не хранятся на Hugging Face Hub. <Youtube id="HyQgpJTkRdE"/> ## Работа с локальными и удаленными датасетами 🤗 Datasets предоставляет скрипты для загрузки собственных датасетов. Библиотека поддерживает несколько распространенных форматов: | Data format | Loading script | Example | | :----------------: | :------------: | :-----------------------------------------------------: | | CSV & TSV | `csv` | `load_dataset("csv", data_files="my_file.csv")` | | Text files | `text` | `load_dataset("text", data_files="my_file.txt")` | | JSON & JSON Lines | `json` | `load_dataset("json", data_files="my_file.jsonl")` | | Pickled DataFrames | `pandas` | `load_dataset("pandas", data_files="my_dataframe.pkl")` | Как показано в таблице, для каждого формата мы должны задать тип скрипта загрузки в функции `load_dataset()` вместе с аргументом `data_files`, который указывает путь к одному или нескольким файлам. Начнем с загрузки набора данных из локальных файлов; позже мы увидим, как сделать то же самое с файлами, расположены на удаленном сервере. ## Загрузка локального датасета Для этого примера мы будем использовать датасет [SQuAD-it dataset](https://github.com/crux82/squad-it/). Это большой датасет для задачи question answering на итальянском языке. Обучающая и тестовая часть расположены на GitHub, мы можем скачать файлы с помощью простой команды `wget`. ```python !wget https://github.com/crux82/squad-it/raw/master/SQuAD_it-train.json.gz !wget https://github.com/crux82/squad-it/raw/master/SQuAD_it-test.json.gz ``` Выполнение этих команд запустит процесс скачивания файлов *SQuAD_it-train.json.gz* и *SQuAD_it-test.json.gz*, которые мы можем распаковать с помощью Linux команды `gzip`: ```python !gzip -dkv SQuAD_it-*.json.gz ``` ```bash SQuAD_it-test.json.gz: 87.4% -- replaced with SQuAD_it-test.json SQuAD_it-train.json.gz: 82.2% -- replaced with SQuAD_it-train.json ``` После выполнения команд мы увидим, что архивы будут заменены файлами _SQuAD_it-train.json_ и _SQuAD_it-text.json_ в формате JSON. <Tip> ✎ Причина, по которой в примере выше перед командами расположен `!` заключается в том, что мы выполняем их в Jupyter notebook. Если вы хотите запустить эти команды в терминале – просто удалите `!`. </Tip> Для загрузки JSON файла с помощью функции `load_dataset()` необходимо знать, с каким типом JSON-файла мы имеем дело: обычный JSON (похожий на вложенный словарь) или JSON, сформированный построчно. Как и многие датасеты для задач question-answering, SQuAD-it использует формат обычного JSON'а с текстом, хранящимся в поле `data`. Это означает, что мы можем подгрузить датасет, задав аргумент `field` следующим образом: ```py from datasets import load_dataset squad_it_dataset = load_dataset("json", data_files="SQuAD_it-train.json", field="data") ``` По умолчанию при загрузке локальных файлов создается объект `DatasetDict` с меткой `train`. Мы можем изучить объект `squad_it_dataset`: ```py squad_it_dataset ``` ```python out DatasetDict({ train: Dataset({ features: ['title', 'paragraphs'], num_rows: 442 }) }) ``` Выше распечатана информация об объекте: число строк и колонки обучающего датасета. Мы можем посмотреть на один объект, проиндексировав его как `train` следующим образом: ```py squad_it_dataset["train"][0] ``` ```python out { "title": "Terremoto del Sichuan del 2008", "paragraphs": [ { "context": "Il terremoto del Sichuan del 2008 o il terremoto...", "qas": [ { "answers": [{"answer_start": 29, "text": "2008"}], "id": "56cdca7862d2951400fa6826", "question": "In quale anno si è verificato il terremoto nel Sichuan?", }, ... ], }, ... ], } ``` Отлично! Мы загрузили наш первый датасет! Но пока мы это сделали только для обучающей части данных, хотя нам нужны и `train`, и `test` в одном `DatasetDict`, чтобы мы могли применить функцию `Dataset.map()` на оба подмножества сразу. Чтобы сделать это, мы можем передать в словарь в `data_files`. Сделать это можно так: ```py data_files = {"train": "SQuAD_it-train.json", "test": "SQuAD_it-test.json"} squad_it_dataset = load_dataset("json", data_files=data_files, field="data") squad_it_dataset ``` ```python out DatasetDict({ train: Dataset({ features: ['title', 'paragraphs'], num_rows: 442 }) test: Dataset({ features: ['title', 'paragraphs'], num_rows: 48 }) }) ``` Это ровно то, чего мы хотели добиться! Далее мы можем применять различные приемы для препроцессинга данных: очистку, токенизацию и прочее. <Tip> Аргумент `data_files` функции `load_dataset()` очень гибкий и может являться путем к файлу, списком путей файлов или словарем, в котором указаны названия сплитов (обучающего и тестового) и пути к соответствующим файлам. Вы также можете найти все подходящие файлы в директории с использованием маски по правилам Unix-консоли (т.е. указать путь к директории и указать `data_files="*.json"` для конкретного сплита). Более подробно это изложено в [документации](https://huggingface.co/docs/datasets/loading#local-and-remote-files) 🤗 Datasets. </Tip> Скрипты загрузки 🤗 Datasets также поддерживают автоматическую распаковку входных файлов, поэтому мы можем пропустить команду `gzip` просто передав в аргумент `data_files` пути к архивам: ```py data_files = {"train": "SQuAD_it-train.json.gz", "test": "SQuAD_it-test.json.gz"} squad_it_dataset = load_dataset("json", data_files=data_files, field="data") ``` Это может быть полезно, если вы не хотите вручную разархивировать GZIP файлы. Автоматическое разархивирование также поддерживает распространенные форматы вроде ZIP и TAR, так что вы можете передавать и пути к таким файлам. Теперь, когда вы знаете, как загрузить локально хранящиеся файлы, мы посмотрим, как подгрузить данные с удаленных серверов. ## Загрузка файлов с удаленного сервера Если вы работаете data scientist или программистом в компании, скорее всего ваши данные хранятся на сервере. К счастью, загрузка файлов с удаленных машин настолько же простая, насколько и загрузка их со локальной машины! Вместо пути к локальным файлам мы передаем аргументу `data_files` один или несколько URL, указывающих на нужные файлы. К примеру, датасет SQuAD-it расположен на GitHub, мы можем просто указать ссылку на файлы следующим образом: ```py url = "https://github.com/crux82/squad-it/raw/master/" data_files = { "train": url + "SQuAD_it-train.json.gz", "test": url + "SQuAD_it-test.json.gz", } squad_it_dataset = load_dataset("json", data_files=data_files, field="data") ``` Эта операция вернет такой же `DatasetDict`, какой мы получали ранее, но избавит нас от загрузки и разархивирования файлов _SQuAD_it-*.json.gz_ вручную. На этом мы завершаем наш обзор различных способов загрузки датасетов, которые не размещены на Hugging Face Hub. Теперь, когда у нас есть датасет, с которым можно поиграться, давайте погрузимся в различные методы обработки данных! <Tip> ✏️ **Попробуйте!** Выберите другой датасет, расположенный на GitHub или в архиве [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/index.php) и попробуйте загрузить его с локальной машины и с удаленного сервера. В качестве бонуса попробуйте загрузить датасет в формате CSV или обычного тектового файла (см. детали по поддерживаемым форматам в [документации](https://huggingface.co/docs/datasets/loading#local-and-remote-files)). </Tip>
course/chapters/ru/chapter5/2.mdx/0
{ "file_path": "course/chapters/ru/chapter5/2.mdx", "repo_id": "course", "token_count": 6631 }
132
# Токенизаторы, проверка![[tokenizers-check]] <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" /> Отличная работа по завершению этой главы! После этого глубокого погружения в токенизаторы вы должны: - Уметь обучать новый токенизатор, используя старый в качестве шаблона. - Понимать, как использовать смещения для сопоставления позиций токенов с их исходным положением в тексте - Знать различия между BPE, WordPiece и Unigram. - Уметь комбинировать блоки, предоставляемые библиотекой 🤗 Tokenizers, для создания собственного токенизатора - Уметь использовать собственный токенизатор в библиотеке 🤗 Transformers
course/chapters/ru/chapter6/9.mdx/0
{ "file_path": "course/chapters/ru/chapter6/9.mdx", "repo_id": "course", "token_count": 590 }
133
# Часть 2 завершена![[part-2-completed]] <CourseFloatingBanner chapter={8} classNames="absolute z-10 right-0 top-0" /> Поздравляем, вы прошли вторую часть курса! Мы активно работаем над третьей частью, поэтому подпишитесь на нашу [рассылку](https://huggingface.curated.co/), чтобы не пропустить ее выход. Теперь вы должны уметь решать различные задачи NLP, дообучать или обучать модели с нуля. Не забудьте поделиться своими результатами с сообществом на [Model Hub](https://huggingface.co/models). Нам не терпится увидеть, что вы построите на основе полученных знаний!
course/chapters/ru/chapter8/6.mdx/0
{ "file_path": "course/chapters/ru/chapter8/6.mdx", "repo_id": "course", "token_count": 499 }
134
# บทนำ ยินดีต้อนรับเข้าสู่คอร์ส Hugging Face! ในส่วนเริ่มต้นนี้จะพาคุณติดตั้งโปรแกรมหรือสิ่งแวดล้อมในการทำงาน(หรือเรียกว่า working environment) หากคุณเพิ่งเริ่มคอร์สนี้ เราแนะนำให้คุณไปดู [บทที่ 1](/course/chapter1) ก่อน จากนั้นค่อยกลับมาติดตั้ง environtment เพื่อทดลองโค้ดด้วยตัวเอง คลังชุดคำสั่ง (หรือเรียกว่า library) ต่าง ๆ ที่ใช้ในคอร์สนี้สามารถใช้เป็นชุดคำสั่งสำเร็จในภาษา Python (หรือเรียกว่า Python package) ได้ทั้งหมด ดังนั้น ในส่วนนี้เราจะแสดงวิธีการติดตั้ง Python environment และลง library ที่จำเป็นทั้งหมด ในส่วนนี้เราจะแสดงวิธีการติดตั้ง working environment อยู่ทั้งหมดสองวิธี ได้แก่ การใช้ Colab notebook และ การใช้สิ่งแวดล้อมจำลองใน Python (หรือเรียกว่า Python virtual environment) คุณสามารถใช้วิธีใดก็ได้ที่ถนัด สำหรับผู้เริ่มต้นแล้ว เราแนะนำให้ใช้ Colab notebook อย่างไรก็ตาม ในที่นี้จะไม่ได้แสดงวิธีการติดตั้งสำหรับระบบปฏิบัติการ Windows หากคุณใช้ Windows แนะนำให้ทำตามวิธีการลงด้วย Colab notebook หากคุณใช้ Linux หรือ macOS คุณสามารถทำตามวิธีใดก็ได้ หากคุณจะทำตามตัวอย่างในคอร์สนี้ คุณจำเป็นต้องมีบัญชี Hugging Face หากยังไม่มีบัญชี คุณสามารถ [สร้างบัญชีใหม่ที่นี่](https://huggingface.co/join) ## การใช้งาน Colab notebook (หรือเรียกว่า Google Colab) การติดตั้งโดยใช้ Colab notebook เป็นวิธีการติดตั้งที่ง่ายที่สุด เพียงแค่คุณเปิดเว็บบราวเซอร์ เช่น Google Chrome, Microsoft Edge, Mozilla Firefox, Brave, หรือ Safari ก็สามารถใช้งานได้เลย หากคุณยังไม่เคยใช้ Colab เราแนะนำให้คุณเริ่มต้นจาก [แนะนำการใช้งาน Colab](https://colab.research.google.com/notebooks/intro.ipynb) โดย Colab อนุญาตให้คุณใช้ฮาร์ดแวร์เพิ่มความเร็ว เช่น GPU หรือ TPU ได้ฟรีสำหรับงานเล็ก ๆ เมื่อคุณคุ้นเคยกับการใช้งาน Colab แล้ว มาสร้าง notebook ใหม่กัน ผลจะออกมาหน้าตาเหมือนด้านล่าง: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/new_colab.png" alt="An empty colab notebook" width="80%"/> </div> ขั้นตอนต่อไป คือ การติดตั้ง library ที่ต้องใช้งานในคอร์สนี้ โดยเราจะใช้คำสั่ง `pip` ซึ่งเป็นตัวจัดการ package ใน Python เพื่อใช้ในการติดตั้ง โดยใน notebook นั้น คุณสามารถใช้งานคำสั่งระบบได้ด้วยการใส่ตัวอักษร `!` ด้านหน้าคำสั่ง ดังนั้นคุณสามารถติดตั้ง library 🤗 Transformers ด้วยคำสั่งดังต่อไปนี้: ``` !pip install transformers ``` คุณสามารถตรวจสอบว่า คุณติดตั้ง package เรียบร้อยแล้วหรือไม่ด้วยการ import เข้าไปใน Python ด้วยคำสั่งดังต่อไปนี้: ``` import transformers ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter0/install.gif" alt="A gif showing the result of the two commands above: installation and import" width="80%"/> </div> ขั้นตอนนี้เป็นการติดตั้ง 🤗 Transformers แบบเวอร์ชั่นเล็กไปก่อน แต่ยังไม่ได้ติดตั้ง framework ใด ๆ ที่เกี่ยวข้องกับ machine learning เช่น PyTorch หรือ TensorFlow เนื่องจากเราต้องใช้ feature ต่าง ๆ ของ library เราจึงแนะนำให้ติดตั้งเวอร์ชันพัฒนาซึ่งมาพร้อมกับ library อื่น ๆ ที่จำเป็น(หรือเรียกว่า dependency) เพื่อให้ครอบคลุมการใช้งานทั้งหมด ด้วยคำสั่งดังต่อไปนี้ ``` !pip install transformers[sentencepiece] ``` ขั้นตอนนี้จะใช้เวลานานนิดหน่อย แต่เมื่อจบแล้วก็ถือเป็นอันเสร็จสิ้น คุณพร้อมจะเริ่มใช้งานโค้ดต่าง ๆ ในคอร์สนี้ได้แล้ว! ## การใช้งานสิ่งแวดล้อมจำลองใน Python (หรือเรียกว่า Python virtual environment) หากคุณถนัดการใช้งาน Python virtual environment มากกว่า ขั้นตอนแรกที่ต้องทำคือการติดตั้ง Python ลงบนเครื่องของคุณ เราแนะนำให้ทำตาม [ขั้นตอนนี้](https://realpython.com/installing-python/) เมื่อคุณติดตั้ง Python เสร็จเรียบร้อย คุณสามารถรันคำสั่ง Python ที่ terminal ของคุณได้ โดยใช้คำสั่งดังต่อไปนี้: `python --version` หากคำสั่งนี้แสดงผลออกมาเป็น version ของ Python แสดงว่าระบบได้ติดตั้ง Python ลงบนเครื่องของคุณเรียบร้อยแล้วจริง ๆ เมื่อคุณใช้คำสั่ง Python ที่ terminal เช่น `python --version` ตัวโปรแกรมจะไปเรียก Python "ชุดหลัก"จากระบบของคุณ เราไม่แนะนำให้คุณติดตั้ง package ใด ๆ ลงบน Python "ชุดหลัก" ดังกล่าว แต่ให้ใช้ในการสร้าง environment แยกออกมาในแต่ละการใช้งาน ดังนั้น แต่ละงานจะมี package และ dependency ของตนเอง ทำให้คุณไม่ต้องกังวลกับปัญหาใช้งานไม่ได้เพราะเวอร์ชันไม่ตรงเนื่องจากเวอร์ชันของ library ที่งานหนึ่งไปขัดกับ library เดียวกันที่อีกงานหนึ่ง สำหรับ Python แล้ว กระบวนการนี้สามารถทำได้โดยใช้ [*virtual environment*](https://docs.python.org/3/tutorial/venv.html) ซึ่งเป็นการเก็บ directory ทั้งหมดในการติดตั้ง package ที่ต้องการในเวอร์ชันที่เราใช้งาน การสร้าง virtual environment สามารถทำได้หลายวิธี แต่เราจะใช้ package อย่างเป็นทางการจาก Python ชื่อว่า [`venv`](https://docs.python.org/3/library/venv.html#module-venv). ขั้นแรกให้สร้าง directory สำหรับการใช้งาน เช่น หากคุณต้องการสร้าง directory ใหม่ ชื่อว่า *transformer-course* ที่ root ของ home directory สามารถใช้คำสั่งดังต่อไปนี้: ``` mkdir ~/transformers-course cd ~/transformers-course ``` ภายใน directory นี้ ให้สร้าง virtual environment โดยใช้โมดูล `venv` ด้วยคำสั่งดังต่อไปนี้: ``` python -m venv .env ``` คุณจะพบ directory ชื่อ *.env* ใน directory ที่เคยว่างเปล่าของคุณ ด้วยคำสั่งดังต่อไปนี้: ``` ls -a ``` ```out . .. .env ``` คุณสามารถเข้าและออก virtual environment ได้ด้วยคำสั่ง `activate` และ `deactivate` ดังต่อไปนี้: ``` # Activate the virtual environment source .env/bin/activate # Deactivate the virtual environment source .env/bin/deactivate ``` คุณสามารถตรวจสอบได้ว่า คุณอยู่ใน environment ใดได้ด้วยคำสั่ง `which python` ระบบจะแสดงผล environment ที่คุณกำลังใช้งานอยู่ ``` which python ``` ```out /home/<user>/transformers-course/.env/bin/python ``` ### การติดตั้ง dependency ดังที่แสดงไว้ข้างต้นในส่วนของการใช้งาน Google Colab คุณจำเป็นต้องติดตั้ง package ต่าง ๆ เพื่อให้ใช้งานได้ โดยคุณสามารถติดตั้งเวอร์ชันพัฒนาของ 🤗 Transformers โดยใช้คำสั่ง `pip` ดังต่อไปนี้: ``` pip install "transformers[sentencepiece]" ``` จากนี้ก็เป็นอันเสร็จสิ้น คุณพร้อมสำหรับการเริ่มต้นแล้ว!
course/chapters/th/chapter0/1.mdx/0
{ "file_path": "course/chapters/th/chapter0/1.mdx", "repo_id": "course", "token_count": 7019 }
135
<FrameworkSwitchCourse {fw} /> # ประกอบทุกอย่างเข้าด้วยกัน {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/th/chapter2/section6_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/th/chapter2/section6_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/th/chapter2/section6_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/th/chapter2/section6_tf.ipynb"}, ]} /> {/if} ในสองสาม sections ที่ผ่านมา เราได้พยายามทำทุกอย่างด้วยมือของเราเอง เราได้ลองศึกษาว่า tokenizer นั้นทำงานอย่างไรและวิธีการ tokenization, แปลงข้อมูลไปเป็น input IDs, การเติม(padding), การตัด(truncation), และ attention masks อย่างไรก็ตาม เหมือนที่เราเห็นใน section 2, 🤗 Transformers API นั้นสามารถจัดการกับสิ่งต่างๆเหล่านั้นให้เราได้ด้วย high-level ฟังก์ชันที่เราจะลงลึงในรายละเอียดกันในที่นี่ เมื่อคุณเรียกใช้งาน `tokenizer` ของคุณตรงๆกับประโยคหนึ่งๆ, คุณได้อินพุตที่พร้อมจะใส่เข้าไปยังโมเดลกลับมา: ```py from transformers import AutoTokenizer checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) sequence = "I've been waiting for a HuggingFace course my whole life." model_inputs = tokenizer(sequence) ``` ในที่นี้ ตัวแปร `model_inputs` นั้นประกอบด้วยทุกอย่างที่จำเป็นสำหรับโมเดลที่จะทำงานได้เป็นอย่างดี สำหรับ DistilBERT นั้นรวมไปถึง input IDs และ attention mask ด้วย ส่วนโมเดลอื่นๆที่รองรับอินพุตต่างๆเพิ่มเติมก็จะได้ผลลัพท์เหล่านั้นจาก `tokenizer` object ด้วย อย่างที่เราจะได้เห็นในบางตัวอย่างด้านล่างนี้ วิธีนี้เป็นวิธีที่ทรงพลังมาก อันดับแรก มันสามารถที่จะ tokenize ประโยคเพียงประโยคเดียวได้: ```py sequence = "I've been waiting for a HuggingFace course my whole life." model_inputs = tokenizer(sequence) ``` มันยังสามารถจัดการกับประโยคหลายๆประโยคได้ในคราวเดียวกัน โดยที่ไม่มีอะไรเปลี่ยนใน API เลย: ```py sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] model_inputs = tokenizer(sequences) ``` มันสามารถที่จะเติม(padding) ให้สอดคล้องกับหลายๆวัตถุประสงค์: ```py # จะเติมประโยคไปจนถึงความยาวที่ยาวที่สุดของประโยค model_inputs = tokenizer(sequences, padding="longest") # จะเติมประโยคไปจนถึงความยาวที่ยาวที่สุดที่โมเดลรับได้ # (512 for BERT or DistilBERT) model_inputs = tokenizer(sequences, padding="max_length") # จะเติมประโยคไปจนถึงความยาวที่ยาวที่สุดที่ระบุไว้ model_inputs = tokenizer(sequences, padding="max_length", max_length=8) ``` มันสามารถตัดประโยคได้อีกด้วย: ```py sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] # จะตัดประโยคที่มีความยาวเกินกว่าความยาวที่โมเดลรับได้ # (512 for BERT or DistilBERT) model_inputs = tokenizer(sequences, truncation=True) # จะตัดประโยคที่มีความยาวเกินกว่าความยาวที่ระบุไว้ model_inputs = tokenizer(sequences, max_length=8, truncation=True) ``` `tokenizer` object สามารถที่จะจัดการกับการแปลงข้อมูลไปเป็น tensors สำหรับ framework ที่เฉพาะเจาะจงได้ ซึ่งสามารถที่จะส่งเข้าโมเดลได้ทันที ยกตัวอย่างเช่น ในโค้ดตัวอย่างต่อไปนี้ เราจะสั่งให้ tokenizer ส่ง tensors จาก frameworks ต่างๆ กัน — `"pt"` ให้ PyTorch tensors, `"tf"` ให้ TensorFlow tensors, and `"np"` ให้ NumPy arrays: ```py sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] # Returns PyTorch tensors model_inputs = tokenizer(sequences, padding=True, return_tensors="pt") # Returns TensorFlow tensors model_inputs = tokenizer(sequences, padding=True, return_tensors="tf") # Returns NumPy arrays model_inputs = tokenizer(sequences, padding=True, return_tensors="np") ``` ## tokens พิเศษ ถ้าเราดูที่ input IDs ที่ได้จาก tokenizer เราจะเห็นได้ว่ามันค่อนข้างแตกต่างไปจากสิ่งที่เราเคยได้ก่อนหน้านี้: ```py sequence = "I've been waiting for a HuggingFace course my whole life." model_inputs = tokenizer(sequence) print(model_inputs["input_ids"]) tokens = tokenizer.tokenize(sequence) ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) ``` ```python out [101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102] [1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012] ``` มีหนึ่ง token ID ได้ถูกใส่เข้ามาด้านหน้าสุด และอีกหนึ่ง token ID ใส่ด้านหลังสุด มาถอดรหัสสองประโยคของ IDs ด้านบนดูว่ามันเกี่ยกับอะไร: ```py print(tokenizer.decode(model_inputs["input_ids"])) print(tokenizer.decode(ids)) ``` ```python out "[CLS] i've been waiting for a huggingface course my whole life. [SEP]" "i've been waiting for a huggingface course my whole life." ``` tokenizer ทำการเพิ่มคำพิเศษ `[CLS]` ที่ด้านหน้าสุด และคำพิเศษ `[SEP]` ที่ด้านหลังสุด นั้นก็เพราะว่าโมเดลนั้นได้ผ่านการเทรนมาแบบนั้น ดังนั้นเพื่อให้ได้ผลลัพท์เดียวกันสำหรับการอนุมาน(inference) เราจำเป็นต้องเพิ่มมันเข้าไปเช่นเดียวกัน แต่ก็ต้องตระหนักว่าบางโมเดลนั้นไม่ได้เพิ่มคำพิเศษ หรือ ใส่คำที่ต่างออกไป; โมเดลอาจจะเพิ่มคำพิเศษเหล่านี้แค่เฉพาะด้านหน้าสุด หรือ ด้านหลังสุดเท่านั้น ไม่ว่าจะในกรณีใดๆ tokenizer รู้ว่าอันไหนเป็นอันที่ต้องการและมันจะจัดการให้คุณเอง: ## สรุป: จาก tokenizer ไปยังโมเดล ถึงตรงนี้เราได้เห็นขั้นตอนแต่ละอย่างทั้งหมดที่ `tokenizer` ใช้เพื่อประมวลผลข้อความ เรามาดูกันครั้งสุดท้ายว่ามันสามารถจัดการประโยคหลายๆประโยค (padding!), ประโยคยาวๆ, และ tensors หลายๆ ประเภทได้อย่างไรด้วย API หลักของมัน: {#if fw === 'pt'} ```py import torch from transformers import AutoTokenizer, AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = AutoModelForSequenceClassification.from_pretrained(checkpoint) sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="pt") output = model(**tokens) ``` {:else} ```py import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) sequences = ["I've been waiting for a HuggingFace course my whole life.", "So have I!"] tokens = tokenizer(sequences, padding=True, truncation=True, return_tensors="tf") output = model(**tokens) ``` {/if}
course/chapters/th/chapter2/6.mdx/0
{ "file_path": "course/chapters/th/chapter2/6.mdx", "repo_id": "course", "token_count": 5756 }
136
# บทนำ <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" /> ใน[บทที่ 3](/course/chapter3) คุณได้เรียนเกี่ยวกับการ fine-tune โมเดลเพื่อนำไปใช้ในงานที่คุณต้องการ ตอนนั้นเราใช้ตัวตัดคำ(tokenizer)แบบเดียวกับตัวที่มากับโมเดล แต่หากคุณอยากจะเทรนโมเดลตั้งแต่เริ่มต้นเลย คุณควรจะเลือกใช้ตัวตัดคำแบบไหนดี ในกรณีนี้ถ้าคุณใช้ตัวตัดคำที่เทรนจากคลังข้อมูล(corpus)ที่ไม่ใช่ภาษาเดียวกับโมเดลหรือคลังข้อมูลที่มาจากโดเมนอื่น(แปลว่าเนื้อหาของข้อมูลที่ใช้เทรนตัวตัดคำและใช้เทรนโมเดลมีความแตกต่างกันมาก)ก็จะไม่เหมาะสมนัก ตัวอย่างเช่น ตัวตัดคำที่เทรนมาสำหรับตัดคำภาษาอังกฤษ เมื่อนำมาใช้เพื่อตัดคำภาษาญี่ปุ่นก็จะได้ผลลัพธ์ที่ไม่ดี เพราะว่าทั้งสองภาษามีการใช้ช่องว่าง(space)และเครื่องหมายวรรคตอน(punctuation)ที่ต่างกันมาก ในบทนี้คุณจะได้เรียนเกี่ยวกับการเทรนตัวตัดคำจากคลังข้อความ(corpus of texts) เพื่อให้ได้ตัวตัดคำที่เหมาะสมกับ language model ที่คุณต้องการจะเทรน เราจะใช้ library ที่ชื่อว่า [🤗 Tokenizers](https://github.com/huggingface/tokenizers) ซึ่งมีตัวตัดคำแบบ "เร็ว" ให้ผู้ใช้เลือกได้ ใน [🤗 Transformers](https://github.com/huggingface/transformers) library เราจะมาดู features ต่างๆของ library นี้กันและมาเรียนรู้ว่าตัวตัดคำแบบเร็วและแบบช้านั้นต่างกันอย่างไร หัวข้อที่เราจะเรียนกันในบทนี้: * การสร้างตัวตัดคำขึ้นมาใหม่ให้คล้ายกับตัวที่ใช้ใน checkpoint โดนใช้ชุดข้อมูลใหม่ในการเทรน * feature พิเศษของตัวตัดคำแบบเร็ว * ความแตกต่างระหว่างอัลกอริทึม 3 แบบที่ใช้ในการสร้างตัวตัดคำประเภท subword ที่ใช้ใน NLP ทุกวันนี้ * การสร้างและเทรนตัวตัดคำตั้งแต่เริ่มต้นด้วย 🤗 Tokenizers library เทคนิคต่างๆที่คุณจะได้เรียนในบทนี้จะเป็นเตรียมให้คุณพร้อมสำหรับ[บทที่ 7](/course/chapter7/6) ซึ่งคุณจะได้เรียนเกี่ยวกับการสร้าง language model ด้วย Python เรามาเริ่มกันที่ความหมายของการ "เทรน" ตัวตัดคำ
course/chapters/th/chapter6/1.mdx/0
{ "file_path": "course/chapters/th/chapter6/1.mdx", "repo_id": "course", "token_count": 2611 }
137
# Decoder modelleri <CourseFloatingBanner chapter={1} classNames="absolute z-10 right-0 top-0" /> <Youtube id="d_ixlCubqQw" /> Decoder modeller, yalnızca bir Transformer modelinin decoderini kullanır. Her aşamada, attention katmanları sadece cümlede kendisinden önce gelen kelimelere erişebilir. Bu modeller *auto-regressive models* olarak isimlendirilir. Decoder modellerin ön eğitimi genellikle cümledeki bir sonraki kelimeyi tahmin etme şeklinde görevlendirilir. Bu modeller, en çok metin oluşturmayı içeren görevler için uygundur. Bu model ailelerinin temsilcileri şunları kapsar: - [CTRL](https://huggingface.co/transformers/model_doc/ctrl.html) - [GPT](https://huggingface.co/docs/transformers/model_doc/openai-gpt) - [GPT-2](https://huggingface.co/transformers/model_doc/gpt2.html) - [Transformer XL](https://huggingface.co/transformers/model_doc/transformerxl.html)
course/chapters/tr/chapter1/6.mdx/0
{ "file_path": "course/chapters/tr/chapter1/6.mdx", "repo_id": "course", "token_count": 371 }
138
<FrameworkSwitchCourse {fw} /> # Đằng sau pipeline {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter2/section2_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter2/section2_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter2/section2_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter2/section2_tf.ipynb"}, ]} /> {/if} <Tip> Đây là phần đầu tiên có nội dung hơi khác một chút tùy thuộc vào việc bạn sử dụng PyTorch hay TensorFlow. Chuyển đổi công tắc trên đầu tiêu đề để chọn nền tảng bạn thích! </Tip> {#if fw === 'pt'} <Youtube id="1pedAIvTWXk"/> {:else} <Youtube id="wVN12smEvqg"/> {/if} Hãy bắt đầu với một ví dụ hoàn chỉnh, cùng xem những gì xảy ra phía sau khi chúng tôi thực thi đoạn mã sau trong [Chương 1](/course/chapter1): ```python from transformers import pipeline classifier = pipeline("sentiment-analysis") classifier( [ "I've been waiting for a HuggingFace course my whole life.", "I hate this so much!", ] ) ``` và thu được: ```python out [{'label': 'POSITIVE', 'score': 0.9598047137260437}, {'label': 'NEGATIVE', 'score': 0.9994558095932007}] ``` Như chúng ta đã thấy trong [Chương 1](/course/chapter1), pipeline này nhóm ba bước lại với nhau: tiền xử lý, đưa các đầu vào qua mô hình và hậu xử lý: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/full_nlp_pipeline.svg" alt="The full NLP pipeline: tokenization of text, conversion to IDs, and inference through the Transformer model and the model head."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/full_nlp_pipeline-dark.svg" alt="The full NLP pipeline: tokenization of text, conversion to IDs, and inference through the Transformer model and the model head."/> </div> Hãy cùng đi qua từng phần này. ## Tiền xử lý với một tokenizer Giống như các mạng nơ-ron khác, các mô hình Transformers không thể xử lý trực tiếp văn bản thô, vì vậy bước đầu tiên trong quy trình của chúng ta là chuyển các đầu vào văn bản thành dạng số mà mô hình có thể hiểu được. Để làm điều này, chúng ta sử dụng *tokenizer*, hàm sẽ chịu trách nhiệm về: - Tách đầu vào thành các từ, từ phụ, hoặc ký hiệu (như dấu chấm câu) được gọi là *tokens* - Ánh xạ mỗi token thành một số nguyên - Thêm đầu vào bổ sung có thể hữu ích cho mô hình Tất cả quá trình tiền xử lý này cần được thực hiện giống hệt như khi mô hình được huấn luyện trước, vì vậy trước tiên chúng ta cần tải xuống thông tin đó từ [Model Hub](https://huggingface.co/models). Để làm điều này, chúng tôi sử dụng lớp `AutoTokenizer` và phương thức `from_pretrained()` của nó. Sử dụng tên checkpoint mô hình của chúng ta, nó sẽ tự động tìm nạp dữ liệu được liên kết với tokenizer của mô hình và lưu vào bộ nhớ cache (vì vậy nó chỉ được tải xuống lần đầu tiên bạn chạy mã bên dưới). Vì checkpoint mặc định của `sentiment-analysis` là `distilbert-base-unsased-finetuned-sst-2-english` (bạn có thể xem thẻ mô hình của nó [tại đây](https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english)), chúng ta chạy như sau: ```python from transformers import AutoTokenizer checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" tokenizer = AutoTokenizer.from_pretrained(checkpoint) ``` Khi có tokenizer rồi, chúng ta có thể truyền trực tiếp các câu của mình vào bên trong và nhận lại một từ điển đã sẵn sàng để cung cấp cho mô hình! Việc duy nhất cần làm là chuyển đổi danh sách các ID đầu vào thành các tensor. Bạn có thể sử dụng 🤗 Transformers mà không phải lo lắng về khung ML nào được sử dụng phía dưới; nó có thể là PyTorch hoặc TensorFlow hoặc Flax đối với một số mô hình. Tuy nhiên, các mô hình Transformer chỉ chấp nhận *tensor* làm đầu vào. Nếu đây là lần đầu tiên bạn nghe về tensor, bạn có thể nghĩ chúng như là mảng NumPy. Mảng NumPy có thể là giá trị vô hướng (0D), vectơ (1D), ma trận (2D) hoặc có nhiều kích thước hơn. Nó thực sự là một tensor; Các tensor của các khung ML khác hoạt động tương tự và thường khởi tạo đơn giản như các mảng NumPy. Để chỉ định loại tensors mà chúng ta muốn trả về (PyTorch, TensorFlow hoặc thuần NumPy), ta sử dụng tham số `return_tensors`: {#if fw === 'pt'} ```python raw_inputs = [ "I've been waiting for a HuggingFace course my whole life.", "I hate this so much!", ] inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="pt") print(inputs) ``` {:else} ```python raw_inputs = [ "I've been waiting for a HuggingFace course my whole life.", "I hate this so much!", ] inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="tf") print(inputs) ``` {/if} Đừng lo lắng về padding (đệm) và truncation (cắt bớt) vội; chúng tôi sẽ giải thích những điều đó sau. Những điều chính cần nhớ ở đây là bạn có thể chuyển một câu hoặc một danh sách các câu, cũng như chỉ định loại tensors bạn muốn lấy lại (nếu không có loại nào được truyền vào, mặc định bạn sẽ nhận được kết quả trả về là một danh sách). {#if fw === 'pt'} Đây là kết quả tương ứng tensor PyTorch: ```python out { 'input_ids': tensor([ [ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102], [ 101, 1045, 5223, 2023, 2061, 2172, 999, 102, 0, 0, 0, 0, 0, 0, 0, 0] ]), 'attention_mask': tensor([ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ]) } ``` {:else} Đây là kết quả tương ứng tensor Tensorflow: ```python out { 'input_ids': <tf.Tensor: shape=(2, 16), dtype=int32, numpy= array([ [ 101, 1045, 1005, 2310, 2042, 3403, 2005, 1037, 17662, 12172, 2607, 2026, 2878, 2166, 1012, 102], [ 101, 1045, 5223, 2023, 2061, 2172, 999, 102, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=int32)>, 'attention_mask': <tf.Tensor: shape=(2, 16), dtype=int32, numpy= array([ [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0] ], dtype=int32)> } ``` {/if} Bản thân kết quả đầu ra là một từ điển có chứa hai khóa, `input_ids` và `attention_mask`. `input_ids` chứa hai hàng số nguyên (một cho mỗi câu) là số nhận dạng duy nhất của token trong mỗi câu. Chúng tôi sẽ giải thích `attention_mask` là gì ở phần sau của chương này. ## Đi qua mô hình {#if fw === 'pt'} Chúng ta có thể tải xuống mô hình được huấn luyện trước của mình giống như cách đã làm với tokenizer. 🤗 Transformers cung cấp một lớp `AutoModel` cũng có phương thức `from_pretrained()`: ```python from transformers import AutoModel checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" model = AutoModel.from_pretrained(checkpoint) ``` {:else} Chúng ta có thể tải xuống mô hình được huấn luyện trước của mình giống như cách đã làm với tokenizer. 🤗 Transformers cung cấp một lớp `TFAutoModel` cũng có phương thức `from_pretrained()`: ```python from transformers import TFAutoModel checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" model = TFAutoModel.from_pretrained(checkpoint) ``` {/if} Trong đoạn mã này, chúng ta đã tải xuống cùng một checkpoint đã sử dụng trong pipeline của mình trước đây (nó được lưu vào bộ nhớ đệm rồi) và khởi tạo một mô hình với nó. Kiến trúc này chỉ chứa mô-đun Transformer cơ sở: với một số đầu vào, nó xuất ra cái mà chúng ta sẽ gọi là *hidden states* (*trạng thái ẩn*), còn được gọi là *đặc trưng*. Đối với mỗi đầu vào mô hình, chúng ta sẽ truy xuất một vectơ đa chiều đại diện cho **sự hiểu theo ngữ cảnh của đầu vào đó bằng mô hình Transformer**. Nếu điều này không hợp lý, đừng lo lắng về nó. Chúng tôi sẽ giải thích tất cả sau. Mặc dù những trạng thái ẩn này có thể tự hữu ích, nhưng chúng thường là đầu vào cho một phần khác của mô hình, được gọi là *head* (*đầu*). Trong [Chapter 1](/course/chapter1), các tác vụ khác nhau có thể được thực hiện với cùng một kiến trúc, nhưng mỗi tác vụ này sẽ có một phần đầu khác nhau được liên kết với nó. ### Một vectơ đa chiều Đầu ra vectơ của mô-đun Transformer thường lớn với ba chiều: - **Kích thước batch (lô)**: Số chuỗi được xử lý tại một thời điểm (trong ví dụ của chúng tôi là 2). - **Độ dài chuỗi**: Độ dài biểu diễn số của chuỗi (trong ví dụ của chúng tôi là 16). - **Kích thước ẩn**: Kích thước vectơ của mỗi đầu vào mô hình. Nó được cho là "có số chiều cao" vì giá trị cuối cùng. Kích thước ẩn có thể rất lớn (768 là giá trị phổ biến cho các mô hình nhỏ hơn và trong các mô hình lớn hơn, con số này có thể đạt tới 3072 hoặc hơn). Có thể thấy điều này nếu chúng ta cung cấp các đầu vào đã xử lý trước cho mô hình của mình: {#if fw === 'pt'} ```python outputs = model(**inputs) print(outputs.last_hidden_state.shape) ``` ```python out torch.Size([2, 16, 768]) ``` {:else} ```py outputs = model(inputs) print(outputs.last_hidden_state.shape) ``` ```python out (2, 16, 768) ``` {/if} Lưu ý rằng đầu ra của các mô hình 🤗 Transformers hoạt động giống như các `namedtuple` hoặc từ điển. Bạn có thể truy cập các phần tử theo thuộc tính (như chúng ta đã làm) hoặc theo khóa (`outputs["last_hidden_state"]`), hoặc thậm chí theo chỉ mục nếu bạn biết chính xác nơi bạn đang tìm kiếm (`outputs[0]`). ### Đầu mô hình: Hợp lý tời từng con số Các đầu mô hình lấy vector đa chiều của các trạng thái ẩn làm đầu vào và chiếu chúng lên một chiều khác. Chúng thường bao gồm một hoặc một vài lớp tuyến tính: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/transformer_and_head.svg" alt="A Transformer network alongside its head."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/transformer_and_head-dark.svg" alt="A Transformer network alongside its head."/> </div> Đầu ra của mô hình Transformer được gửi trực tiếp đến đầu mô hình để được xử lý. Trong biểu đồ này, mô hình được biểu diễn bằng lớp nhúng của nó và các lớp tiếp theo. Lớp nhúng chuyển đổi mỗi ID trong đầu vào được mã hóa thành một vectơ đại diện cho token được liên kết. Các lớp tiếp theo thao tác các vectơ đó bằng cách sử dụng cơ chế chú ý để tạo ra biểu diễn cuối cùng của các câu. Có nhiều kiến trúc khác nhau có sẵn trong 🤗 Transformers, với mỗi kiến trúc được thiết kế xoay quanh một tác vụ cụ thể. Đây là danh sách không đầy đủ: - `*Model` (truy xuất các trạng thái ẩn) - `*ForCausalLM` - `*ForMaskedLM` - `*ForMultipleChoice` - `*ForQuestionAnswering` - `*ForSequenceClassification` - `*ForTokenClassification` - and others 🤗 {#if fw === 'pt'} Với ví dụ của mình, chúng ta sẽ cần một mô hình có đầu phân loại tuần tự (để có thể phân loại các câu là khẳng định hoặc phủ định). Vì vậy, ta sẽ không sử dụng lớp `AutoModel` mà là `AutoModelForSequenceClassification`: ```python from transformers import AutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" model = AutoModelForSequenceClassification.from_pretrained(checkpoint) outputs = model(**inputs) ``` {:else} Với ví dụ của mình, chúng ta sẽ cần một mô hình có đầu phân loại tuần tự (để có thể phân loại các câu là khẳng định hoặc phủ định). Vì vậy, ta sẽ không sử dụng lớp `TFAutoModel` mà là `TFAutoModelForSequenceClassification`: ```python from transformers import TFAutoModelForSequenceClassification checkpoint = "distilbert-base-uncased-finetuned-sst-2-english" model = TFAutoModelForSequenceClassification.from_pretrained(checkpoint) outputs = model(inputs) ``` {/if} Giờ thì nếu chúng ta nhìn vào hình dạng các đầu vào của mình, kích thước sẽ thấp hơn nhiều: đầu mô hình lấy các vectơ đa chiều mà chúng ta đã thấy trước đây và xuất ra các vectơ có chứa hai giá trị (mỗi giá trị tương ứng một nhãn): ```python print(outputs.logits.shape) ``` {#if fw === 'pt'} ```python out torch.Size([2, 2]) ``` {:else} ```python out (2, 2) ``` {/if} Vì chúng ta chỉ có hai câu và hai nhãn, kết quả nhận được từ mô hình của chúng ta là dạng 2 x 2. ## Hậu xử lý đầu ra Các giá trị chúng ta nhận được dưới dạng đầu ra từ mô hình không nhất thiết phải tự có nghĩa. Hãy cùng xem: ```python print(outputs.logits) ``` {#if fw === 'pt'} ```python out tensor([[-1.5607, 1.6123], [ 4.1692, -3.3464]], grad_fn=<AddmmBackward>) ``` {:else} ```python out <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[-1.5606991, 1.6122842], [ 4.169231 , -3.3464472]], dtype=float32)> ``` {/if} Mô hình đã dự đoán `[-1.5607, 1.6123]` cho câu đầu tiên và `[4.1692, -3.3464]` cho câu thứ hai. Đó không phải là xác suất mà là *logits*, điểm số thô, chưa chuẩn hóa được xuất ra bởi lớp cuối cùng của mô hình. Để được chuyển đổi thành xác suất, chúng cần phải trải qua lớp [SoftMax](https://en.wikipedia.org/wiki/Softmax_function) (tất cả các mô hình 🤗 Transformers đều xuất ra logits, vì hàm mất mát cho việc huấn luyện thường sẽ kết hợp hàm kích hoạt cuối cùng, chẳng hạn như SoftMax, với hàm mất mát thực tế, chẳng hạn như entropy chéo): {#if fw === 'pt'} ```py import torch predictions = torch.nn.functional.softmax(outputs.logits, dim=-1) print(predictions) ``` {:else} ```py import tensorflow as tf predictions = tf.math.softmax(outputs.logits, axis=-1) print(predictions) ``` {/if} {#if fw === 'pt'} ```python out tensor([[4.0195e-02, 9.5980e-01], [9.9946e-01, 5.4418e-04]], grad_fn=<SoftmaxBackward>) ``` {:else} ```python out tf.Tensor( [[4.01951671e-02 9.59804833e-01] [9.9945587e-01 5.4418424e-04]], shape=(2, 2), dtype=float32) ``` {/if} Bây giờ chúng ta có thể thấy rằng mô hình đã dự đoán `[0.0402, 0.9598]` cho câu đầu tiên và `[0.9995, 0.0005]` cho câu thứ hai. Đây là những điểm xác suất dễ nhận biết. Để lấy các nhãn tương ứng với từng vị trí, chúng ta có thể kiểm tra thuộc tính `id2label` của cấu hình mô hình (tìm hiểu thêm về điều này trong phần tiếp theo): ```python model.config.id2label ``` ```python out {0: 'NEGATIVE', 1: 'POSITIVE'} ``` Bây giờ chúng ta có thể kết luận rằng mô hình đã dự đoán như sau: - Câu đầu tiên: TIÊU CỰC: 0,0402, TÍCH CỰC: 0,9598 - Câu thứ hai: TIÊU CỰC: 0,9995, TÍCH CỰC: 0,0005 Chúng tôi đã tái tạo thành công ba bước của quy trình: tiền xử lý bằng tokenizers, đưa đầu vào qua mô hình và hậu xử lý! Giờ thì chúng ta hãy dành một chút thời gian để đi sâu hơn vào từng bước đó. <Tip> ✏️ **Thử nghiệm thôi!** Chọn hai (hoặc nhiều) văn bản của riêng bạn và chạy chúng thông qua `sentiment-analysis`. Sau đó, tự mình lặp lại các bước bạn đã thấy ở đây và kiểm tra xem bạn có thu được kết quả tương tự không! </Tip>
course/chapters/vi/chapter2/2.mdx/0
{ "file_path": "course/chapters/vi/chapter2/2.mdx", "repo_id": "course", "token_count": 10232 }
139
<FrameworkSwitchCourse {fw} /> # Chia sẻ các mô hình huấn luyện trước {#if fw === 'pt'} <CourseFloatingBanner chapter={4} classNames="absolute z-10 right-0 top-0" notebooks={[ { label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter4/section3_pt.ipynb", }, { label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter4/section3_pt.ipynb", }, ]} /> {:else} <CourseFloatingBanner chapter={4} classNames="absolute z-10 right-0 top-0" notebooks={[ { label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter4/section3_tf.ipynb", }, { label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter4/section3_tf.ipynb", }, ]} /> {/if} Trong các bước bên dưới, chúng ta sẽ xem xét các cách dễ nhất để chia sẻ các mô hình được huấn luyện trước với 🤗 Hub. Ta có sẵn các công cụ và tiện ích giúp việc chia sẻ và cập nhật mô hình trực tiếp trên Hub trở nên đơn giản, và chúng ta sẽ cùng nhau khám phá bên dưới. <Youtube id="9yY3RB_GSPM" /> Chúng tôi khuyến khích tất cả người dùng huấn luyện mô hình đóng góp bằng cách chia sẻ chúng với cộng đồng - chia sẻ mô hình, ngay cả khi được huấn luyện trên các bộ dữ liệu rất cụ thể, sẽ giúp ích cho những người khác, giúp họ tiết kiệm thời gian và tính toán tài nguyên và cung cấp quyền truy cập vào các hiện vật hữu ích được huấn luyện. Đổi lại, bạn có thể hưởng lợi từ công việc mà những người khác đã làm! Có ba cách để tạo kho lưu trữ mô hình mới: - Sử dụng API `push_to_hub` - Sử dụng thư viện Python `huggingface_hub` - Sử dụng giao diện web Khi bạn đã tạo một kho lưu trữ, bạn có thể tải tệp lên đó qua git và git-lfs. Chúng tôi sẽ hướng dẫn bạn cách tạo kho lưu trữ mô hình và tải tệp lên chúng trong các phần sau. ## Sử dụng API `push_to_hub` {#if fw === 'pt'} <Youtube id="Zh0FfmVrKX0" /> {:else} <Youtube id="pUh5cGmNV8Y" /> {/if} Cách đơn giản nhất để tải tệp lên Hub là tận dụng API `push_to_hub`. Trước khi đi xa hơn, bạn sẽ cần tạo token xác thực để API `huggingface_hub` biết bạn là ai và bạn có quyền ghi vào không gian tên nào. Đảm bảo rằng bạn đang ở trong môi trường mà bạn đã cài đặt `transformers` (xem [Thiết lập](/course/chapter0)). Nếu bạn đang ở trong notebook, bạn có thể sử dụng chức năng sau để đăng nhập: ```python from huggingface_hub import notebook_login notebook_login() ``` Trên terminal, bạn có thể: ```bash huggingface-cli login ``` Trong cả hai trường hợp, bạn sẽ được nhắc nhập tên người dùng và mật khẩu của mình, đó là những mật khẩu mà bạn sử dụng để đăng nhập vào Hub. Nếu bạn chưa có hồ sơ Hub, bạn nên tạo một hồ sơ [tại đây](https://huggingface.co/join). Tuyệt vời! Bây giờ bạn có token xác thực được lưu trữ trong thư mục bộ nhớ cache của mình. Hãy tạo một số kho lưu trữ thôi! {#if fw === 'pt'} Nếu bạn đã thử với API `Trainer` để huấn luyện một mô hình, thì cách dễ nhất để tải nó lên Hub là đặt `push_to_hub=True` khi bạn định nghĩa `TrainingArguments`: ```py from transformers import TrainingArguments training_args = TrainingArguments( "bert-finetuned-mrpc", save_strategy="epoch", push_to_hub=True ) ``` Khi bạn gọi `trainer.train()`, `Trainer` sau đó sẽ tải mô hình của bạn lên Hub mỗi khi nó được lưu (ở đây là mỗi epoch) trong một kho lưu trữ trong không gian tên của bạn. Kho lưu trữ đó sẽ được đặt tên giống như thư mục đầu ra bạn đã chọn (ở đây là `bert-finetuned-mrpc`) nhưng bạn có thể chọn một tên khác với `hub_model_id = "a_different_name"`. Để tải mô hình của bạn lên tổ chức mà bạn là thành viên, chỉ cần truyền nó vào qua `hub_model_id = "my_organization/my_repo_name"`. Sau khi quá trình huấn luyện của bạn kết thúc, bạn nên thực hiện một `trainer.push_to_hub()` cuối cùng để tải lên phiên bản cuối cùng của mô hình của bạn. Nó cũng sẽ tạo ra một thẻ mô hình với tất cả các siêu dữ liệu liên quan, báo cáo các siêu tham số được sử dụng và kết quả đánh giá! Dưới đây là một ví dụ về nội dung bạn có thể tìm thấy trong một thẻ mô hình như vậy: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/model_card.png" alt="An example of an auto-generated model card." width="100%" /> </div> {:else} Nếu bạn sử dụng Keras để huấn luyện mô hình, cách dễ nhất để tải nó lên Hub là sử dụng `PushToHubCallback` khi bạn gọi `model.fit()`: ```py from transformers import PushToHubCallback callback = PushToHubCallback( "bert-finetuned-mrpc", save_strategy="epoch", tokenizer=tokenizer ) ``` Sau đó, bạn nên thêm `callbacks=[callback]` trong lệnh gọi của mình tới `model.fit()`. Sau đó, lệnh này sẽ tải mô hình của bạn lên Hub mỗi khi nó được lưu (ở đây là mỗi epoch) trong một kho lưu trữ trên không gian tên của bạn. Kho lưu trữ đó sẽ được đặt tên giống như thư mục đầu ra bạn đã chọn (ở đây là `bert-finetuned-mrpc`) nhưng bạn có thể chọn một tên khác với `hub_model_id = "a_different_name"`. Để tải mô hình của bạn lên tổ chức mà bạn là thành viên, chỉ cần truyền nó vào qua `hub_model_id = "my_organization/my_repo_name"`. {/if} Ở cấp độ thấp hơn, việc truy cập Model Hub có thể được thực hiện trực tiếp trên các mô hình, tokenizer và các đối tượng cấu hình thông qua phương thức `push_to_hub()`. Phương pháp này xử lý cả việc tạo kho lưu trữ và đẩy các tệp mô hình và tệp tokenizer trực tiếp đến kho lưu trữ. Không cần xử lý thủ công, không giống như với API mà chúng ta sẽ thấy bên dưới. Để có ý tưởng về cách nó hoạt động, trước tiên chúng ta hãy khởi tạo một mô hình và một tokenizer: {#if fw === 'pt'} ```py from transformers import AutoModelForMaskedLM, AutoTokenizer checkpoint = "camembert-base" model = AutoModelForMaskedLM.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) ``` {:else} ```py from transformers import TFAutoModelForMaskedLM, AutoTokenizer checkpoint = "camembert-base" model = TFAutoModelForMaskedLM.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) ``` {/if} Bạn có thể tự do làm bất cứ điều gì bạn muốn với những thứ này - thêm token vào trình tokenize, huấn luyện mô hình, tinh chỉnh nó. Khi bạn hài lòng với kết quả mô hình, trọng số, và tokenizer, bạn có thể tận dụng phương thức `push_to_hub()` có sẵn trực tiếp trên đối tượng `model`: ```py model.push_to_hub("dummy-model") ``` Điều này sẽ tạo kho lưu trữ mới `dummy-model` trong hồ sơ của bạn và điền nó vào các tệp mô hình của bạn. Làm tương tự với tokenizer để tất cả các tệp có sẵn trong kho lưu trữ này: ```py tokenizer.push_to_hub("dummy-model") ``` Nếu bạn thuộc một tổ chức, chỉ cần chỉ định tham số `organization` để tải lên không gian tên của tổ chức đó: ```py tokenizer.push_to_hub("dummy-model", organization="huggingface") ``` Nếu bạn muốn sử dụng một token Hugging Face cụ thể, bạn cũng có thể chỉ định nó thông qua `push_to_hub()`: ```py tokenizer.push_to_hub("dummy-model", organization="huggingface", use_auth_token="<TOKEN>") ``` Giờ hãy đi tới Model Hub để tìm mô hình mới được tải lên của bạn: *https://huggingface.co/user-or-organization/dummy-model*. Nhấp vào tab "Files and versions" ("Tệp và phiên bản") và bạn sẽ thấy các tệp hiển thị trong ảnh chụp màn hình sau: {#if fw === 'pt'} <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/push_to_hub_dummy_model.png" alt="Dummy model containing both the tokenizer and model files." width="80%"/> </div> {:else} <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/push_to_hub_dummy_model_tf.png" alt="Dummy model containing both the tokenizer and model files." width="80%"/> </div> {/if} <Tip> ✏️ **Thử nghiệm thôi!** Lấy mô hình và trình tokenize được liên kết với checkpoint `bert-base-cased` và tải chúng lên kho lưu trữ trong không gian tên của bạn bằng phương thức `push_to_hub()`. Kiểm tra kỹ xem repo có xuất hiện chính xác trên trang của bạn hay không trước khi xóa nó. </Tip> Như bạn đã thấy, phương thức `push_to_hub()` nhận một vài tham số, giúp bạn có thể tải lên không gian tên tổ chức hoặc kho lưu trữ cụ thể hoặc sử dụng token API khác. Chúng tôi khuyên bạn nên xem thông số kỹ thuật phương pháp có sẵn trực tiếp trong [🤗 tài liệu về Transformers](https://huggingface.co/transformers/model_sharing.html) để biết những gì ta có thể làm. Phương thức `push_to_hub()` được hỗ trợ bởi gói Python [`huggingface_hub`](https://github.com/huggingface/huggingface_hub), cung cấp một API trực tiếp đến Hugging Face Hub. Nó được tích hợp trong 🤗 Transformers và một số thư viện học máy khác, như [`allenlp`](https://github.com/allenai/allennlp). Mặc dù chúng tôi tập trung vào tích hợp 🤗 Transformers trong chương này, việc tích hợp nó vào mã hoặc thư viện của riêng bạn rất đơn giản. Chuyển đến phần cuối cùng để xem cách tải tệp lên kho lưu trữ mới tạo của bạn! ## Sử dụng thư viện Python `huggingface_hub` Thư viện Python `huggingface_hub` là một gói cung cấp một bộ công cụ cho các hub mô hình và tập dữ liệu. Nó cung cấp các phương thức và lớp đơn giản cho các tác vụ phổ biến như tiếp nhận thông tin về kho lưu trữ trên hub và quản lý chúng. Nó cung cấp các API đơn giản hoạt động trên git để quản lý nội dung của các kho đó và tích hợp Hub trong các dự án và thư viện của bạn. Tương tự như việc sử dụng API `push_to_hub`, điều này sẽ yêu cầu bạn lưu token API vào bộ nhớ cache của mình. Để thực hiện việc này, bạn sẽ cần sử dụng lệnh `login` từ CLI, như đã đề cập trong phần trước (một lần nữa, hãy đảm bảo thêm các lệnh này với ký tự `!` nếu chạy trên Google Colab): ```bash huggingface-cli login ``` Gói `huggingface_hub` cung cấp một số phương thức và lớp hữu ích cho mục đích của chúng ta. Thứ nhất, có một số phương pháp để quản lý việc tạo, xóa kho lưu trữ và các phương pháp khác: ```python no-format from huggingface_hub import ( # Quản lý người dùng login, logout, whoami, # Tạo và quản lý kho dữ liệu create_repo, delete_repo, update_repo_visibility, # Và một số phương thức truy xuất/thay đổi thông tin về mặt nội dung list_models, list_datasets, list_metrics, list_repo_files, upload_file, delete_file, ) ``` Ngoài ra, nó cung cấp lớp `Repository` rất mạnh mẽ để quản lý một kho lưu trữ cục bộ. Chúng ta sẽ khám phá các phương thức này và lớp đó trong phần tiếp theo để hiểu cách tận dụng chúng. Phương thức `create_repo` có thể được sử dụng để tạo một kho lưu trữ mới trên hub: ```py from huggingface_hub import create_repo create_repo("dummy-model") ``` Thao tác này sẽ tạo kho lưu trữ `dummy-model` trong không gian tên của bạn. Nếu muốn, bạn có thể chỉ định tổ chức nào mà kho lưu trữ sẽ thuộc về bằng cách sử dụng tham số `organization`: ```py from huggingface_hub import create_repo create_repo("dummy-model", organization="huggingface") ``` Thao tác này sẽ tạo kho lưu trữ `dummy-model` trong không gian tên `huggingface`, giả sử bạn thuộc tổ chức đó. Các tham số có thể hữu ích khác là: - `private`, để chỉ định xem liệu kho lưu trữ có nên hiển thị với những người khác hay không. - `token`, nếu bạn muốn ghi đè token được lưu trữ trong bộ nhớ cache của mình bằng một token nhất định. - `repo_type`, nếu bạn muốn tạo `dataset` hoặc `space` thay vì một mô hình. Các giá trị được chấp nhận là `"dataset"` và `"space"`. Khi kho lưu trữ được tạo, chúng ta nên thêm tệp vào đó! Chuyển sang phần tiếp theo để xem ba cách có thể xử lý vấn đề này. ## Sử dụng giao diện web Giao diện web cung cấp các công cụ để quản lý kho lưu trữ trực tiếp trong Hub. Sử dụng giao diện này, bạn có thể dễ dàng tạo kho lưu trữ, thêm tệp (thậm chí cả tệp lớn!), Khám phá các mô hình, trực quan hóa các điểm khác biệt và hơn thế nữa. Để tạo một kho lưu trữ mới, hãy truy cập [huggingface.co/new](https://huggingface.co/new): <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/new_model.png" alt="Page showcasing the model used for the creation of a new model repository." width="80%" /> </div> Đầu tiên, chỉ định chủ sở hữu của kho lưu trữ: đây có thể là bạn hoặc bất kỳ tổ chức nào mà bạn liên kết. Nếu bạn chọn một tổ chức, mô hình sẽ được giới thiệu trên trang của tổ chức và mọi thành viên của tổ chức sẽ có khả năng đóng góp vào kho lưu trữ. Tiếp theo, nhập tên mô hình của bạn. Đây cũng sẽ là tên của kho lưu trữ. Cuối cùng, bạn có thể chỉ định xem bạn muốn mô hình của mình là công khai hay riêng tư. Các mô hình tư nhân được ẩn khỏi chế độ xem công khai. Sau khi tạo kho mô hình, bạn sẽ thấy một trang như sau: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/empty_model.png" alt="An empty model page after creating a new repository." width="80%" /> </div> Đây là nơi mô hình của bạn sẽ được lưu trữ. Để bắt đầu điền nó, bạn có thể thêm tệp README trực tiếp từ giao diện web. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/dummy_model.png" alt="The README file showing the Markdown capabilities." width="80%" /> </div> Tệp README nằm trong Markdown - hãy thoải mái sử dụng nó! Phần thứ ba của chương này dành riêng cho việc xây dựng một thẻ mô hình. Đây là những điều quan trọng hàng đầu trong việc mang lại giá trị cho mô hình của bạn, vì chúng là nơi bạn nói cho người khác biết nó có thể làm gì. Nếu bạn nhìn vào tab "Files and versions" hay "Tệp và phiên bản", bạn sẽ thấy rằng chưa có nhiều tệp ở đó - chỉ có _README.md_ bạn vừa tạo và tệp _.gitattributes_ theo dõi các tệp lớn. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/files.png" alt="The 'Files and versions' tab only shows the .gitattributes and README.md files." width="80%" /> </div> Tiếp theo, chúng ta sẽ xem xét cách thêm một số tệp mới. ## Tải các tệp mô hình Hệ thống quản lý tệp trên Hugging Face Hub dựa trên git cho các tệp thông thường và git-lfs (viết tắt của [Git Large File Storage](https://git-lfs.github.com/)) cho các tệp lớn hơn . Trong phần tiếp theo, chúng ta sẽ xem xét ba cách khác nhau để tải tệp lên Hub: thông qua `huggingface_hub` và thông qua lệnh git. ### Phương pháp `upload_file` Sử dụng `upload_file` không yêu cầu cài đặt git và git-lfs trên hệ thống của bạn. Nó đẩy các tệp trực tiếp đến 🤗 Hub bằng cách sử dụng các yêu cầu HTTP POST. Một hạn chế của phương pháp này là nó không xử lý các tệp có kích thước lớn hơn 5GB. Nếu tệp của bạn lớn hơn 5GB, vui lòng làm theo hai phương pháp khác được nêu chi tiết bên dưới. API có thể được sử dụng như sau: ```py from huggingface_hub import upload_file upload_file( "<path_to_file>/config.json", path_in_repo="config.json", repo_id="<namespace>/dummy-model", ) ``` Thao tác này sẽ tải tệp `config.json` có sẵn tại `<path_to_file>` vào thư mục gốc của kho lưu trữ là `config.json`, vào kho lưu trữ `dummy-model`. Các tham số có thể hữu ích khác là: - `token`, nếu bạn muốn ghi đè token được lưu trữ trong bộ nhớ cache của mình bằng một token nhất định. - `repo_type`, nếu bạn muốn tải lên `dataset` hoặc `space` thay vì một mô hình. Các giá trị được chấp nhận là `"dataset"` và `"space"`. ### Lớp `Repository` Lớp `Repository` quản lý một kho lưu trữ cục bộ theo cách giống như git. Nó tóm tắt hầu hết các điểm khó khăn mà người ta có thể có với git để cung cấp tất cả các tính năng mà chúng tôi yêu cầu. Sử dụng lớp này yêu cầu phải cài đặt git và git-lfs, vì vậy hãy đảm bảo rằng bạn đã cài đặt git-lfs (xem [tại đây](https://git-lfs.github.com/) để biết hướng dẫn cài đặt) và thiết lập trước khi bắt đầu. Để bắt đầu chơi với kho lưu trữ chúng ta vừa tạo, chúng ta có thể bắt đầu bằng cách khởi tạo nó vào một thư mục cục bộ bằng cách sao chép kho lưu trữ từ xa: ```py from huggingface_hub import Repository repo = Repository("<path_to_dummy_folder>", clone_from="<namespace>/dummy-model") ``` Thao tác này đã tạo thư mục `<path_to_dummy_folder>` trong thư mục làm việc của chúng ta. Thư mục này chỉ chứa tệp `.gitattributes` vì đó là tệp duy nhất được tạo khi khởi tạo kho lưu trữ thông qua `create_repo`. Từ thời điểm này, chúng ta có thể tận dụng một số phương pháp git truyền thống: ```py repo.git_pull() repo.git_add() repo.git_commit() repo.git_push() repo.git_tag() ``` Và những cái khác! Chúng tôi khuyên bạn nên xem tài liệu về `Repository` hay `Kho lưu trữ` có sẵn [tại đây](https://github.com/huggingface/huggingface_hub/tree/main/src/huggingface_hub#advanced-programmatic-repository-management) để biết tổng quan về tất cả các phương pháp. Hiện tại, chúng ta có một mô hình và một tokenizer mà ta muốn đưa vào Hub. Chúng ta đã nhân bản thành công kho lưu trữ, do đó chúng tôi có thể lưu các tệp trong kho lưu trữ đó. Trước tiên, chúng tôi đảm bảo rằng bản sao cục bộ được cập nhật bằng cách kéo về những thay đổi mới nhất: ```py repo.git_pull() ``` Sau đó, ta lưu mô hình và tệp tokenizer: ```py model.save_pretrained("<path_to_dummy_folder>") tokenizer.save_pretrained("<path_to_dummy_folder>") ``` `<path_to_dummy_folder>` bây giờ chứa tất cả các tệp mô hình và tokenizer. Chúng ta thực hiện theo quy trình làm việc git thông thường bằng cách thêm tệp vào khu vực lưu trữ thay đổi, cam kết chúng và đẩy chúng vào hub: ```py repo.git_add() repo.git_commit("Thêm mô hình và tệp tokenizer") repo.git_push() ``` Xin chúc mừng! Bạn vừa đẩy các tệp đầu tiên của mình lên Hub. ### Phương pháp dựa trên git Đây là cách tiếp cận rất đơn giản để tải tệp lên: chúng ta sẽ làm trực tiếp với git và git-lfs. Hầu hết khó khăn đã được loại bỏ bởi các cách tiếp cận trước đây, nhưng có một số lưu ý với phương pháp tiếp theo, vì vậy chúng ta sẽ theo một trường hợp sử dụng phức tạp hơn. Sử dụng lớp này yêu cầu phải cài đặt git và git-lfs, vì vậy hãy đảm bảo bạn đã cài đặt [git-lfs](https://git-lfs.github.com/) (xem hướng dẫn cài đặt tại đây) và cài đặt trước khi bắt đầu . Trước tiên, hãy bắt đầu bằng cách khởi tạo git-lfs: ```bash git lfs install ``` ```bash Updated git hooks. Git LFS initialized. ``` Sau khi hoàn tất, bước đầu tiên là sao chép kho lưu trữ mô hình của bạn: ```bash git clone https://huggingface.co/<namespace>/<your-model-id> ``` Tên người dùng của tôi là `lysandre` và ta đã sử dụng tên mô hình là `dummy`, vì vậy lệnh kết thúc như sau: ``` git clone https://huggingface.co/lysandre/dummy ``` Bây giờ ta có một thư mục tên _dummy_ trong thư mục làm việc của mình. Ta có thể `cd` vào thư mục và xem nội dung: ```bash cd dummy && ls ``` ```bash README.md ``` Nếu bạn vừa tạo kho lưu trữ của mình bằng phương pháp `create_repo` của Hugging Face Hub, thì thư mục này chỉ nên chứa tệp `.gitattributes` ẩn. Nếu bạn đã làm theo hướng dẫn trong phần trước để tạo kho lưu trữ bằng giao diện web, thì thư mục phải chứa một tệp _README.md_ duy nhất cùng với tệp `.gitattributes` ẩn, như được hiển thị ở đây. Việc thêm một tệp có kích thước thông thường, chẳng hạn như tệp cấu hình, tệp từ vựng hoặc về cơ bản là bất kỳ tệp nào dưới vài megabyte, được thực hiện chính xác như cách người ta làm trong bất kỳ hệ thống dựa trên git nào. Tuy nhiên, các tệp lớn hơn phải được đăng ký thông qua git-lfs để đẩy chúng lên _huggingface.co_. Hãy quay lại Python một chút để tạo một mô hình và trình tokenize mà chúng ta muốn cam kết với kho lưu trữ dummy của chúng ta: {#if fw === 'pt'} ```py from transformers import AutoModelForMaskedLM, AutoTokenizer checkpoint = "camembert-base" model = AutoModelForMaskedLM.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) # Làm bất cứ điều gì với mô hình, huấn luyện nó, tinh chỉnh nó ... model.save_pretrained("<path_to_dummy_folder>") tokenizer.save_pretrained("<path_to_dummy_folder>") ``` {:else} ```py from transformers import TFAutoModelForMaskedLM, AutoTokenizer checkpoint = "camembert-base" model = TFAutoModelForMaskedLM.from_pretrained(checkpoint) tokenizer = AutoTokenizer.from_pretrained(checkpoint) # Làm bất cứ điều gì với mô hình, huấn luyện nó, tinh chỉnh nó ... model.save_pretrained("<path_to_dummy_folder>") tokenizer.save_pretrained("<path_to_dummy_folder>") ``` {/if} Bây giờ chúng ta đã lưu một số tạo tác mô hình và tokenizer, hãy xem xét lại thư mục _dummy_: ```bash ls ``` {#if fw === 'pt'} ```bash config.json pytorch_model.bin README.md sentencepiece.bpe.model special_tokens_map.json tokenizer_config.json tokenizer.json ``` Nếu bạn nhìn vào kích thước tệp (ví dụ: với `ls -lh`), bạn sẽ thấy rằng tệp dict trạng thái mô hình (_pytorch_model.bin_) là ngoại lệ duy nhất, với hơn 400 MB. {:else} ```bash config.json README.md sentencepiece.bpe.model special_tokens_map.json tf_model.h5 tokenizer_config.json tokenizer.json ``` Nếu bạn nhìn vào kích thước tệp (ví dụ: với `ls -lh`), bạn sẽ thấy rằng tệp dict trạng thái mô hình (_t5_model.h5_) là ngoại lệ duy nhất, với hơn 400 MB. {/if} <Tip> ✏️ Khi tạo kho lưu trữ từ giao diện web, tệp *.gitattributes* được tự động thiết lập để xem xét các tệp có phần mở rộng nhất định, chẳng hạn như *.bin* và *.h5*, là tệp lớn và git-lfs sẽ theo dõi chúng mà không có thiết lập cần thiết về phía bạn. </Tip>{" "} Bây giờ chúng ta có thể tiếp tục và tiến hành như chúng ta thường làm với các kho lưu trữ Git truyền thống. Chúng ta có thể thêm tất cả các tệp vào môi trường dàn dựng của Git bằng lệnh `git add`: ```bash git add . ``` Sau đó, chúng ta có thể xem xét các tệp hiện đang được sắp xếp: ```bash git status ``` {#if fw === 'pt'} ```bash On branch main Your branch is up to date with 'origin/main'. Changes to be committed: (use "git restore --staged <file>..." to unstage) modified: .gitattributes new file: config.json new file: pytorch_model.bin new file: sentencepiece.bpe.model new file: special_tokens_map.json new file: tokenizer.json new file: tokenizer_config.json ``` {:else} ```bash On branch main Your branch is up to date with 'origin/main'. Changes to be committed: (use "git restore --staged <file>..." to unstage) modified: .gitattributes new file: config.json new file: sentencepiece.bpe.model new file: special_tokens_map.json new file: tf_model.h5 new file: tokenizer.json new file: tokenizer_config.json ``` {/if} Tương tự, chúng ta có thể đảm bảo rằng git-lfs đang theo dõi các tệp chính xác bằng cách sử dụng lệnh `status`: ```bash git lfs status ``` {#if fw === 'pt'} ```bash On branch main Objects to be pushed to origin/main: Objects to be committed: config.json (Git: bc20ff2) pytorch_model.bin (LFS: 35686c2) sentencepiece.bpe.model (LFS: 988bc5a) special_tokens_map.json (Git: cb23931) tokenizer.json (Git: 851ff3e) tokenizer_config.json (Git: f0f7783) Objects not staged for commit: ``` Chúng ta có thể thấy rằng tất cả các tệp đều có `Git` làm trình xử lý, ngoại trừ _pytorch_model.bin_ và _sentencepiece.bpe.model_, có` LFS`. Tuyệt vời! {:else} ```bash On branch main Objects to be pushed to origin/main: Objects to be committed: config.json (Git: bc20ff2) sentencepiece.bpe.model (LFS: 988bc5a) special_tokens_map.json (Git: cb23931) tf_model.h5 (LFS: 86fce29) tokenizer.json (Git: 851ff3e) tokenizer_config.json (Git: f0f7783) Objects not staged for commit: ``` Chúng ta có thể thấy rằng tất cả các tệp đều có `Git` làm trình xử lý, ngoại trừ _t5_model.h5_, có `LFS`. Tuyệt vời! {/if} Hãy tiến hành các bước cuối cùng, cam kết và đẩy đến kho lưu trữ từ xa _huggingface.co_: ```bash git commit -m "First model version" ``` {#if fw === 'pt'} ```bash [main b08aab1] First model version 7 files changed, 29027 insertions(+) 6 files changed, 36 insertions(+) create mode 100644 config.json create mode 100644 pytorch_model.bin create mode 100644 sentencepiece.bpe.model create mode 100644 special_tokens_map.json create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json ``` {:else} ```bash [main b08aab1] First model version 6 files changed, 36 insertions(+) create mode 100644 config.json create mode 100644 sentencepiece.bpe.model create mode 100644 special_tokens_map.json create mode 100644 tf_model.h5 create mode 100644 tokenizer.json create mode 100644 tokenizer_config.json ``` {/if} Việc đẩy có thể mất một chút thời gian, tùy thuộc vào tốc độ kết nối internet và kích thước tệp của bạn: ```bash git push ``` ```bash Uploading LFS objects: 100% (1/1), 433 MB | 1.3 MB/s, done. Enumerating objects: 11, done. Counting objects: 100% (11/11), done. Delta compression using up to 12 threads Compressing objects: 100% (9/9), done. Writing objects: 100% (9/9), 288.27 KiB | 6.27 MiB/s, done. Total 9 (delta 1), reused 0 (delta 0), pack-reused 0 To https://huggingface.co/lysandre/dummy 891b41d..b08aab1 main -> main ``` {#if fw === 'pt'} Nếu chúng ta xem qua kho lưu trữ mô hình khi quá trình này kết thúc, chúng ta có thể thấy tất cả các tệp được thêm gần đây: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/full_model.png" alt="The 'Files and versions' tab now contains all the recently uploaded files." width="80%" /> </div> Giao diện người dùng cho phép bạn khám phá các tệp mô hình và các cam kết cũng như xem sự khác biệt được giới thiệu bởi mỗi cam kết: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/diffs.gif" alt="The diff introduced by the recent commit." width="80%"/> </div> {:else} Nếu chúng ta xem qua kho lưu trữ mô hình khi quá trình này kết thúc, chúng ta có thể thấy tất cả các tệp được thêm gần đây: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/full_model_tf.png" alt="The 'Files and versions' tab now contains all the recently uploaded files." width="80%" /> </div> Giao diện người dùng cho phép bạn khám phá các tệp mô hình và các cam kết cũng như xem sự khác biệt được giới thiệu bởi mỗi cam kết: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter4/diffstf.gif" alt="The diff introduced by the recent commit." width="80%"/> </div> {/if}
course/chapters/vi/chapter4/3.mdx/0
{ "file_path": "course/chapters/vi/chapter4/3.mdx", "repo_id": "course", "token_count": 17924 }
140
<FrameworkSwitchCourse {fw} /> # Fast tokenizers in the QA pipeline {#if fw === 'pt'} <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter6/section3b_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter6/section3b_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/vi/chapter6/section3b_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/vi/chapter6/section3b_tf.ipynb"}, ]} /> {/if} Giờ chúng ta sẽ đi sâu vào pipeline `question-answering` và xem cách tận dụng các offset để lấy câu trả lời cho các câu hỏi dựa theo từ ngữ cảnh, giống như chúng ta đã làm với các thực thể được nhóm trong phần trước. Sau đó, chúng ta sẽ xem làm thế nào có thể đối phó với những ngữ cảnh rất dài mà cuối cùng lại bị cắt bớt. Bạn có thể bỏ qua phần này nếu không quan tâm đến tác vụ hỏi đáp. {#if fw === 'pt'} <Youtube id="_wxyB3j3mk4"/> {:else} <Youtube id="b3u8RzBCX9Y"/> {/if} ## Sử dụng pipeline `question-answering` Như đã thấy trong [Chương 1](/course/chapter1), ta có thể sử dụng pipeline `question-answering` như sau để nhận được câu trả lời cho câu hỏi: ```py from transformers import pipeline question_answerer = pipeline("question-answering") context = """ 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch, and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question = "Which deep learning libraries back 🤗 Transformers?" question_answerer(question=question, context=context) ``` ```python out {'score': 0.97773, 'start': 78, 'end': 105, 'answer': 'Jax, PyTorch and TensorFlow'} ``` Không như các pipeline khác không thể cắt gọn và chia văn bản dài hơn độ dài tối đa cho phép của mô hình (dẫn đến bỏ lỡ những thông tin ở phần cuối văn bản), pipeline này có thể xử lý tốt với những ngữ cảnh dài và sẽ trả về câu trả lời kể cả khi nó nằm ở cuối văn bản: ```py long_context = """ 🤗 Transformers: State of the Art NLP 🤗 Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. 🤗 Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. Why should I use transformers? 1. Easy-to-use state-of-the-art models: - High performance on NLU and NLG tasks. - Low barrier to entry for educators and practitioners. - Few user-facing abstractions with just three classes to learn. - A unified API for using all our pretrained models. - Lower compute costs, smaller carbon footprint: 2. Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages. 3. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - Seamlessly pick the right framework for training, evaluation and production. 4. Easily customize a model or an example to your needs: - We provide examples for each architecture to reproduce the results published by its original authors. - Model internals are exposed as consistently as possible. - Model files can be used independently of the library for quick experiments. 🤗 Transformers is backed by the three most popular deep learning libraries — Jax, PyTorch and TensorFlow — with a seamless integration between them. It's straightforward to train your models with one before loading them for inference with the other. """ question_answerer(question=question, context=long_context) ``` ```python out {'score': 0.97149, 'start': 1892, 'end': 1919, 'answer': 'Jax, PyTorch and TensorFlow'} ``` Hãy cùng nhau xem nó làm thế nào! ## Sử dụng mô hình cho tác vụ hỏi đáp Như những pipeline khác, ta sẽ bắt đầu với việc tokenize đầu vào và sau đó truyền chúng vào trong mô hình. Mặc định checkpoint được sử dụng cho pipeline `question-answering` là [`distilbert-base-cased-distilled-squad`](https://huggingface.co/distilbert-base-cased-distilled-squad) ( "squad" trong tên bắt nguồn từ bộ dữ liệu mà mô hình sử dụng để tinh chỉnh; ta sẽ nói sâu hơn về bộ dữ liệu SQuAD này ở [Chương 7](/course/chapter7/7)): {#if fw === 'pt'} ```py from transformers import AutoTokenizer, AutoModelForQuestionAnswering model_checkpoint = "distilbert-base-cased-distilled-squad" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint) inputs = tokenizer(question, context, return_tensors="pt") outputs = model(**inputs) ``` {:else} ```py from transformers import AutoTokenizer, TFAutoModelForQuestionAnswering model_checkpoint = "distilbert-base-cased-distilled-squad" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint) inputs = tokenizer(question, context, return_tensors="tf") outputs = model(**inputs) ``` {/if} Lưu ý rằng chúng ta tokenize câu hỏi và ngữ cảnh như một cặp, với câu hỏi đứng trước. <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter6/question_tokens.svg" alt="An example of tokenization of question and context"/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter6/question_tokens-dark.svg" alt="An example of tokenization of question and context"/> </div> Các mô hình hỏi đáp hoạt động hơi khác so với các mô hình mà ta đã thấy cho đến nay. Sử dụng hình trên làm ví dụ, mô hình đã được huấn luyện để dự đoán chỉ mục của token bắt đầu câu trả lời (ở đây là 21) và chỉ mục của token nơi câu trả lời kết thúc (ở đây là 24). Đây là lý do tại sao các mô hình đó không trả về một tensor logit mà là hai: một cho các logit tương ứng với token bắt đầu của câu trả lời và một cho các các logit tương ứng với token kết thúc của câu trả lời. Vì trong trường hợp này, chúng ta chỉ có một đầu vào chứa 66 token, ta nhận được: ```py start_logits = outputs.start_logits end_logits = outputs.end_logits print(start_logits.shape, end_logits.shape) ``` {#if fw === 'pt'} ```python out torch.Size([1, 66]) torch.Size([1, 66]) ``` {:else} ```python out (1, 66) (1, 66) ``` {/if} Để chuyển đổi các logit đó thành xác suất, chúng ta sẽ áp dụng một hàm softmax - nhưng trước đó, chúng ta cần đảm bảo rằng chúng ta che dấu các chỉ mục không phải là một phần của ngữ cảnh. Đầu vào của chúng tôi là `[CLS] question [SEP] context [SEP]`, vì vậy chúng ta cần che dấu các token của câu hỏi cũng như token `[SEP]`. Tuy nhiên, chúng ta sẽ giữ token `[CLS]` vì một số mô hình sử dụng nó để chỉ ra rằng câu trả lời không nằm trong ngữ cảnh. Vì chúng ta sẽ áp dụng softmax sau đó, chúng ta chỉ cần thay thế các logit muốn che bằng một số âm lớn. Ở đây, chúng ta sử dụng `-10000`: {#if fw === 'pt'} ```py import torch sequence_ids = inputs.sequence_ids() # Che tất cả mọi thứ trừ token của ngữ cảnh mask = [i != 1 for i in sequence_ids] # Hiển thị token [CLS] mask[0] = False mask = torch.tensor(mask)[None] start_logits[mask] = -10000 end_logits[mask] = -10000 ``` {:else} ```py import tensorflow as tf sequence_ids = inputs.sequence_ids() # Che tất cả mọi thứ trừ token của ngữ cảnh mask = [i != 1 for i in sequence_ids] # Hiển thị token [CLS] mask[0] = False mask = tf.constant(mask)[None] start_logits = tf.where(mask, -10000, start_logits) end_logits = tf.where(mask, -10000, end_logits) ``` {/if} Giờ chúng ta đã che các logit tương ứng với các vị trí mà chúng ta không muốn dự đoán, chúng ta có thể áp dụng softmax: {#if fw === 'pt'} ```py start_probabilities = torch.nn.functional.softmax(start_logits, dim=-1)[0] end_probabilities = torch.nn.functional.softmax(end_logits, dim=-1)[0] ``` {:else} ```py start_probabilities = tf.math.softmax(start_logits, axis=-1)[0].numpy() end_probabilities = tf.math.softmax(end_logits, axis=-1)[0].numpy() ``` {/if} Ở giai đoạn này, chúng ta có thể lấy argmax xác suất bắt đầu và kết thúc - nhưng chúng ta có thể kết thúc với chỉ mục bắt đầu lớn hơn kết thúc, vì vậy chúng ta cần thực hiện thêm một số biện pháp phòng ngừa. Chúng ta sẽ tính toán xác suất của từng `start_index` và `end_index` có thể trong đó `start_index <= end_index`, sau đó lấy `(start_index, end_index)` với xác suất cao nhất. Giả sử các sự kiện "Câu trả lời bắt đầu ở `start_index`" và "Câu trả lời kết thúc ở `end_index`" là độc lập, xác suất để câu trả lời bắt đầu tại `start_index` và kết thúc tại `end_index` là: $$\mathrm{start\_probabilities}[\mathrm{start\_index}] \times \mathrm{end\_probabilities}[\mathrm{end\_index}]$$ Vì vậy, để tính tất cả các điểm, chúng ta chỉ cần tính tích \\(\mathrm{start\_probabilities}[\mathrm{start\_index}] \times \mathrm{end\_probabilities}[\mathrm{end\_index}]\\) với `start_index <= end_index`. Đầu tiên, hãy tính toán tất cả các đầu ra có thể có: ```py scores = start_probabilities[:, None] * end_probabilities[None, :] ``` {#if fw === 'pt'} Sau đó, chúng tôi sẽ che các giá trị trong đó `start_index > end_index` bằng cách đặt chúng thành `0` (các xác suất khác đều là số dương). Hàm `torch.triu()` trả về phần tam giác phía trên của tensor 2D được truyền dưới dạng tham số, vì vậy nó sẽ thực hiện việc che đó cho chúng ta: ```py scores = torch.triu(scores) ``` {:else} Sau đó, chúng tôi sẽ che các giá trị trong đó `start_index > end_index` bằng cách đặt chúng thành `0` (các xác suất khác đều là số dương). Hàm `np.triu()` trả về phần tam giác phía trên của tensor 2D được truyền dưới dạng tham số, vì vậy nó sẽ thực hiện việc che đó cho chúng ta: ```py import numpy as np scores = np.triu(scores) ``` {/if} Bây giờ chúng ta chỉ cần lấy chỉ mục tối đa. Vì PyTorch sẽ trả về chỉ mục trong tensor phẳng, chúng ta cần sử dụng phép chia làm tròn xuống `//` và lấy dư `%` để nhận được `start_index` và `end_index`: ```py max_index = scores.argmax().item() start_index = max_index // scores.shape[1] end_index = max_index % scores.shape[1] print(scores[start_index, end_index]) ``` Chúng ta chưa xong đâu, nhưng ít nhất chúng ta đã có điểm chính xác cho câu trả lời (bạn có thể kiểm tra điều này bằng cách so sánh nó với kết quả đầu tiên trong phần trước): ```python out 0.97773 ``` <Tip> ✏️ **Thử nghiệm thôi!** Tính chỉ mục bắt đầu và kết thúc cho năm cấu trả lời đầu tiện. </Tip> Ta có `start_index` và `end_index` của câu trả lời theo token nên ta chỉ cần chuyển đổi các chỉ mục kí tự trong ngữ cảnh. Đấy là nơi offset sẽ cực kì hữu ích. Ta có thể lấy và sử dụng chúng như cách ta làm trong tác vụ phân loại token: ```py inputs_with_offsets = tokenizer(question, context, return_offsets_mapping=True) offsets = inputs_with_offsets["offset_mapping"] start_char, _ = offsets[start_index] _, end_char = offsets[end_index] answer = context[start_char:end_char] ``` Bây giờ chúng ta chỉ cần định dạng mọi thứ để có được kết quả: ```py result = { "answer": answer, "start": start_char, "end": end_char, "score": scores[start_index, end_index], } print(result) ``` ```python out {'answer': 'Jax, PyTorch and TensorFlow', 'start': 78, 'end': 105, 'score': 0.97773} ``` Tuyệt quá! Kết quả đó giống như trong ví dụ đầu tiên của chúng ta! <Tip> ✏️ **Thử nghiệm thôi!** Sử dụng điểm tốt nhất mà bạn đã tính toán trước đó để hiển thị năm câu trả lời có khả năng nhất. Để kiểm tra kết quả của bạn, hãy quay lại đường dẫn đầu tiên và truyền vào `top_k=5` khi gọi nó. </Tip> ## Xử lý các ngữ cảnh dài Nếu chúng ta cố gắng tokenize các câu hỏi và ngữ cảnh dài ta từng lấy làm ví dụ trước đó, ta sẽ nhận được số token nhiều hơn độ dài tối da sử dụng trong pipeline `question-answering` (đó là 384): ```py inputs = tokenizer(question, long_context) print(len(inputs["input_ids"])) ``` ```python out 461 ``` Vì vậy, chúng ta sẽ cần phải cắt bớt đầu vào của mình ở độ dài tối đa đó. Có một số cách ta có thể làm điều này, nhưng chúng ta không muốn cắt ngắn câu hỏi, chỉ cắt bỏ ngữ cảnh. Vì ngữ cảnh là câu thứ hai, chúng ta sẽ sử dụng chiến lược cắt ngắn `"only_second"`. Vấn đề nảy sinh sau đó là câu trả lời cho câu hỏi có thể không nằm trong ngữ cảnh đã bị cắt ngắn. Ví dụ: ở đây, chúng ta đã chọn một câu hỏi trong đó câu trả lời nằm ở cuối ngữ cảnh và khi cắt ngắn câu trả lời đó thì câu trả lời không còn: ```py inputs = tokenizer(question, long_context, max_length=384, truncation="only_second") print(tokenizer.decode(inputs["input_ids"])) ``` ```python out """ [CLS] Which deep learning libraries back [UNK] Transformers? [SEP] [UNK] Transformers : State of the Art NLP [UNK] Transformers provides thousands of pretrained models to perform tasks on texts such as classification, information extraction, question answering, summarization, translation, text generation and more in over 100 languages. Its aim is to make cutting-edge NLP easier to use for everyone. [UNK] Transformers provides APIs to quickly download and use those pretrained models on a given text, fine-tune them on your own datasets and then share them with the community on our model hub. At the same time, each python module defining an architecture is fully standalone and can be modified to enable quick research experiments. Why should I use transformers? 1. Easy-to-use state-of-the-art models: - High performance on NLU and NLG tasks. - Low barrier to entry for educators and practitioners. - Few user-facing abstractions with just three classes to learn. - A unified API for using all our pretrained models. - Lower compute costs, smaller carbon footprint: 2. Researchers can share trained models instead of always retraining. - Practitioners can reduce compute time and production costs. - Dozens of architectures with over 10,000 pretrained models, some in more than 100 languages. 3. Choose the right framework for every part of a model's lifetime: - Train state-of-the-art models in 3 lines of code. - Move a single model between TF2.0/PyTorch frameworks at will. - Seamlessly pick the right framework for training, evaluation and production. 4. Easily customize a model or an example to your needs: - We provide examples for each architecture to reproduce the results published by its original authors. - Model internal [SEP] """ ``` Điều này có nghĩa là mô hình sẽ gặp khó khăn trong việc chọn ra câu trả lời chính xác. Để khắc phục điều này, pipeline hỏi đáp cho phép chúng ta chia ngữ cảnh thành các phần nhỏ hơn, chỉ định độ dài tối đa. Để đảm bảo rằng chúng ta không chia bối cảnh chính xác ở vị trí sai để có thể tìm ra câu trả lời, nó cũng bao gồm một số phần trùng lặp giữa các phần. Chúng ta có thể yêu cầu tokenizer (nhanh hoặc chậm) thực hiện việc này bằng cách thêm `return_overflowing_tokens=True` và ta có thể chỉ định sự giao thoa mà ta muốn qua than số `stride`. Đây là một ví dụ, sử dụng một câu nhỏ hơn: ```py sentence = "This sentence is not too long but we are going to split it anyway." inputs = tokenizer( sentence, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2 ) for ids in inputs["input_ids"]: print(tokenizer.decode(ids)) ``` ```python out '[CLS] This sentence is not [SEP]' '[CLS] is not too long [SEP]' '[CLS] too long but we [SEP]' '[CLS] but we are going [SEP]' '[CLS] are going to split [SEP]' '[CLS] to split it anyway [SEP]' '[CLS] it anyway. [SEP]' ``` Có thể thấy, câu đã bị chia thành các đoạn sao cho mỗi phần trong `inputs["input_ids"]` có nhiều nhất 6 token (ta sẽ cần thêm đệm để đảm bảo chúng có cùng kích thước) và sẽ có sử giao thoa của 2 token giữa các phần. Hãy cùng nhìn kĩ hơn vào kết quả tokenize: ```py print(inputs.keys()) ``` ```python out dict_keys(['input_ids', 'attention_mask', 'overflow_to_sample_mapping']) ``` Như dự đoán, ta nhận được ID đầu vào và attention mask.Ở đây, `overflow_to_sample_mapping` là một phép ánh xạ cho ta biết câu nào trong kết quả liên quan -- ta có 7 kết quả dều từ câu mà ta truyền vào tokenizer: ```py print(inputs["overflow_to_sample_mapping"]) ``` ```python out [0, 0, 0, 0, 0, 0, 0] ``` Điều này hữu ích hơn khi ta tokenize nhiều câu cùng nhau, Ví dụ: ```py sentences = [ "This sentence is not too long but we are going to split it anyway.", "This sentence is shorter but will still get split.", ] inputs = tokenizer( sentences, truncation=True, return_overflowing_tokens=True, max_length=6, stride=2 ) print(inputs["overflow_to_sample_mapping"]) ``` trả cho ta: ```python out [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1] ``` nghĩa là câu đầu tiên được chia thành 7 đoạn như phần phía trước, và 4 đoạn tiếp theo đến từ câu thứ hai. Bây giờ chúng ta hãy cùng quay trở lại ngữ cảnh dài. Theo mặc định, pipeline ``question-answering` sử dụng độ dài tối đa là 384, như đã đề cập trước đó và khoảng cách 128, tương ứng với cách mô hình được tinh chỉnh (bạn có thể điều chỉnh các tham số đó bằng cách truyền `max_seq_len` và `stride` khi gọi pipeline). Do đó, chúng ta sẽ sử dụng các tham số đó khi tokenize. Chúng ta cũng sẽ thêm phần đệm (để có các mẫu có cùng chiều dài, vì vậy chúng ta có thể tạo ra các tensor) cũng như yêu cầu các offset: ```py inputs = tokenizer( question, long_context, stride=128, max_length=384, padding="longest", truncation="only_second", return_overflowing_tokens=True, return_offsets_mapping=True, ) ``` Các `inputs` sẽ chứa các ID đầu vào và các attention mask mà mô hình kì vọng, cũng như offset và `overflow_to_sample_mapping` ta vừa trao đổi ở trên. Vì hai tham số đó không phải là tham số được sử dụng bởi mô hình, chúng ta sẽ đưa chúng ra khỏi `inputs` (và không lưu trữ ánh xạ, vì nó không hữu ích ở đây) trước khi chuyển đổi nó thành tensor: {#if fw === 'pt'} ```py _ = inputs.pop("overflow_to_sample_mapping") offsets = inputs.pop("offset_mapping") inputs = inputs.convert_to_tensors("pt") print(inputs["input_ids"].shape) ``` ```python out torch.Size([2, 384]) ``` {:else} ```py _ = inputs.pop("overflow_to_sample_mapping") offsets = inputs.pop("offset_mapping") inputs = inputs.convert_to_tensors("tf") print(inputs["input_ids"].shape) ``` ```python out (2, 384) ``` {/if} Bối cảnh dài của chúng ta được chia làm hai, đồng nghĩa sau khi nó đi qua mô hình, chúng ta sẽ có hai bộ logit bắt đầu và kết thúc: ```py outputs = model(**inputs) start_logits = outputs.start_logits end_logits = outputs.end_logits print(start_logits.shape, end_logits.shape) ``` {#if fw === 'pt'} ```python out torch.Size([2, 384]) torch.Size([2, 384]) ``` {:else} ```python out (2, 384) (2, 384) ``` {/if} Giống như trước đây, đầu tiên chúng ta che các token không phải là một phần của ngữ cảnh trước khi sử dụng softmax. Chúng ta cũng che tất cả các token đệm (được gắn mác bởi attention mask): {#if fw === 'pt'} ```py sequence_ids = inputs.sequence_ids() # Che tất cả mọi thứ trừ token của ngữ cảnh mask = [i != 1 for i in sequence_ids] # Hiển thị token [CLS] mask[0] = False # Che tất cả token [PAD] mask = torch.logical_or(torch.tensor(mask)[None], (inputs["attention_mask"] == 0)) start_logits[mask] = -10000 end_logits[mask] = -10000 ``` {:else} ```py sequence_ids = inputs.sequence_ids() # Che tất cả mọi thứ trừ token của ngữ cảnh mask = [i != 1 for i in sequence_ids] # Hiển thị token [CLS] mask[0] = False # Che tất cả token [PAD] mask = tf.math.logical_or(tf.constant(mask)[None], inputs["attention_mask"] == 0) start_logits = tf.where(mask, -10000, start_logits) end_logits = tf.where(mask, -10000, end_logits) ``` {/if} Sau đó, chúng ta có thể sử dụng softmax để chuyển đổi các logit của chúng ta thành xác suất: {#if fw === 'pt'} ```py start_probabilities = torch.nn.functional.softmax(start_logits, dim=-1) end_probabilities = torch.nn.functional.softmax(end_logits, dim=-1) ``` {:else} ```py start_probabilities = tf.math.softmax(start_logits, axis=-1).numpy() end_probabilities = tf.math.softmax(end_logits, axis=-1).numpy() ``` {/if} Bước tiếp theo tương tự như những gì chúng ta đã làm cho bối cảnh nhỏ, nhưng chúng ta lặp lại nó cho mỗi phần trong hai phần của mình. Chúng ta tính điểm cho tất cả các khoảng câu trả lời có thể có, sau đó lấy phần có điểm tốt nhất: {#if fw === 'pt'} ```py candidates = [] for start_probs, end_probs in zip(start_probabilities, end_probabilities): scores = start_probs[:, None] * end_probs[None, :] idx = torch.triu(scores).argmax().item() start_idx = idx // scores.shape[1] end_idx = idx % scores.shape[1] score = scores[start_idx, end_idx].item() candidates.append((start_idx, end_idx, score)) print(candidates) ``` {:else} ```py candidates = [] for start_probs, end_probs in zip(start_probabilities, end_probabilities): scores = start_probs[:, None] * end_probs[None, :] idx = np.triu(scores).argmax().item() start_idx = idx // scores.shape[1] end_idx = idx % scores.shape[1] score = scores[start_idx, end_idx].item() candidates.append((start_idx, end_idx, score)) print(candidates) ``` {/if} ```python out [(0, 18, 0.33867), (173, 184, 0.97149)] ``` Hai ứng cử viên đó tương ứng với các câu trả lời tốt nhất mà mô hình có thể tìm thấy trong mỗi đoạn. Mô hình chắc chắn hơn rằng câu trả lời đúng nằm ở phần thứ hai (đó là một dấu hiệu tốt!). Bây giờ chúng ta chỉ cần ánh xạ khoảng hai token đó với khoảng các ký tự trong ngữ cảnh (chúng ta chỉ cần lập ánh xạ cái thứ hai để có câu trả lời, nhưng thật thú vị khi xem mô hình đã chọn những gì trong đoạn đầu tiên). <Tip> ✏️ **Thử nghiệm thôi!** Hãy điều chỉnh đoạn mã trên để trả về điểm và khoảng cho năm câu trả lời có nhiều khả năng nhất (tổng cộng, không phải cho mỗi đoạn). </Tip> `offsets` mà chúng ta đã nắm được trước đó thực sự là một danh sách các offset, với một danh sách trên mỗi đoạn văn bản: ```py for candidate, offset in zip(candidates, offsets): start_token, end_token, score = candidate start_char, _ = offset[start_token] _, end_char = offset[end_token] answer = long_context[start_char:end_char] result = {"answer": answer, "start": start_char, "end": end_char, "score": score} print(result) ``` ```python out {'answer': '\n🤗 Transformers: State of the Art NLP', 'start': 0, 'end': 37, 'score': 0.33867} {'answer': 'Jax, PyTorch and TensorFlow', 'start': 1892, 'end': 1919, 'score': 0.97149} ``` Nếu chúng ta bỏ qua kết quả đầu tiên, chúng ta sẽ nhận được kết quả tương tự như pipeline cho ngữ cảnh dài này - yayy! <Tip> ✏️ **Thử nghiệm thôi!** Sử dụng điểm tốt nhất bạn đã tính toán trước đó để hiển thị năm câu trả lời có khả năng xảy ra nhất (cho toàn bộ ngữ cảnh, không phải từng đoạn). Để kiểm tra kết quả của bạn, hãy quay lại pipeline đầu tiên và truyền vào `top_k=5` khi gọi nó. </Tip> Điều này kết thúc phần đi sâu vào các khả năng của tokenizer. Chúng ta sẽ đưa tất cả những điều này vào thực tế một lần nữa trong chương tiếp theo, khi chúng tôi hướng dẫn bạn cách tinh chỉnh một mô hình về một loạt các tác vụ NLP phổ biến.
course/chapters/vi/chapter6/3b.mdx/0
{ "file_path": "course/chapters/vi/chapter6/3b.mdx", "repo_id": "course", "token_count": 13374 }
141
# Giới thiệu Bây giờ bạn đã biết cách giải quyết các tác vụ NLP phổ biến nhất với 🤗 Transformers, bạn sẽ có thể bắt đầu các dự án của riêng mình! Trong chương này, chúng ta sẽ khám phá những việc cần làm khi bạn gặp sự cố. Bạn sẽ học cách gỡ lỗi thành công mã hoặc quá trình huấn luyện của mình và cách yêu cầu cộng đồng trợ giúp nếu bạn không thể tự mình giải quyết vấn đề. Và nếu bạn cho rằng mình đã tìm thấy lỗi trong một trong các thư viện Hugging Faces, chúng tôi sẽ chỉ cho bạn cách tốt nhất để báo cáo lỗi đó để sự cố được giải quyết nhanh nhất có thể. Chính xác hơn, trong chương này, bạn sẽ học: - Điều đầu tiên cần làm khi bạn gặp lỗi - Cách yêu cầu trợ giúp trên [diễn đàn](https://discuss.huggingface.co/) - Cách gỡ lỗi đường dẫn huấn luyện của bạn - Làm thế nào để viết một vấn đề tốt Tất nhiên, không điều gì trong số này liên quan cụ thể đến 🤗 Transformers hoặc hệ sinh thái Hugging Face; các bài học từ chương này có thể áp dụng cho hầu hết các dự án nguồn mở!
course/chapters/vi/chapter8/1.mdx/0
{ "file_path": "course/chapters/vi/chapter8/1.mdx", "repo_id": "course", "token_count": 887 }
142
<!-- DISABLE-FRONTMATTER-SECTIONS --> # Đố vui cuối chương Hãy kiểm tra những gì bạn đã học được trong chương này! ### 1. Bạn có thể sử dụng Gradio để làm gì? <Question choices={[ { text: "Tạo bản demo cho mô hình học máy của bạn", explain: "Với một vài dòng mã python, bạn có thể tạo bản demo cho mô hình ML của mình bằng cách sử dụng thư viện các thành phần được tạo sẵn của chúng tôi.", correct: true }, { text: "Chia sẻ mô hình học máy của bạn với những người khác", explain: "Sử dụng tham số <code>share=True</code> trong phương thức khởi chạy, bạn có thể tạo liên kết chia sẻ để gửi cho bất kỳ ai.", correct: true }, { text: "Gỡ lỗi mô hình của bạn", explain: "Một lợi thế của bản demo gradio là có thể kiểm tra mô hình của bạn với dữ liệu thực mà bạn có thể thay đổi và quan sát sự thay đổi dự đoán của mô hình trong thời gian thực, giúp bạn gỡ lỗi mô hình của mình.", correct: true }, { text: "Huấn luyện mô hình của bạn", explain: "Gradio được thiết kể để sử dụng cho việc luận suy mô hình, SAU KHI mô hình của bạn đã được huấn luyện.", } ]} /> ### 2. Gradio CHỈ hoạt động với các mô hình PyTorch <Question choices={[ { text: "Đúng", explain: "Gradio hoạt động với các mô hình PyTorch, nhưng cũng hoạt động với bất kỳ loại mô hình học máy nào!" }, { text: "Sai", explain: "Gradio là mô hình bất khả tri, có nghĩa là bạn có thể tạo bản demo cho bất kỳ loại mô hình học máy nào.", correct: true } ]} /> ### 3. Bạn có thể khởi chạy bản demo Gradio từ đâu? <Question choices={[ { text: "IDE python chuẩn", explain: "Gradio hoạt động hiệu quả với IDE yêu thích của bạn.", correct: true }, { text: "Google Colab notebooks", explain: "Bạn có thể tạo và khởi chạy bản trình diễn trong notebook Google colab của mình.", correct: true }, { text: "Jupyter notebooks", explain: "Lựa chọn tốt - Bạn có thể tạo và khởi chạy bản demo trong Jupyter notebook của mình.", correct: true } ]} /> ### 4. Gradio được thiết kế chủ yếu cho các mô hình NLP <Question choices={[ { text: "Đúng", explain: "Gradio hoạt động với khá nhiều kiểu dữ liệu, không chỉ NLP." }, { text: "Sai", explain: "Gradio cung cấp cho các nhà phát triển thư viện các thành phần được tạo sẵn cho hầu hết các loại dữ liệu.", correct: true } ]} /> ### 5. Tính năng nào sau đây được hỗ trợ bởi Gradio? <Question choices={[ { text: "Nhiều đầu vào và đầu ra", explain: "Có thể có nhiều đầu vào và đầu ra với gradio. Tất cả những gì bạn cần làm là chuyển danh sách đầu vào và đầu ra cho các tham số tương ứng của chúng", correct: true }, { text: "Trạng thái duy trì dữ liệu", explain: "Gradio có khả năng thêm trạng thái vào giao diện của bạn.", correct: true }, { text: "Xác thực tên người dùng và mật khẩu", explain: "Chuyển danh sách tên người dùng/mật khẩu vào phương thức khởi chạy để thêm xác thực.", correct: true }, { text: "Phân tích tự động cho những ai sử dụng bản demo gradio của bạn", explain: "Hãy thử lại - Gradio không cung cấp phân tích cho các nhà phát triển về những người sử dụng bản demo của họ." }, { text: "Tải mô hình từ Model Hub của Hugging Face hoặc Hugging Face Spaces", explain: "Chắc chắn rồi - tải bất kỳ mô hình Hugging Face nào qua phương thức <code>gr.Interface.load()</code>", correct: true } ]} /> ### 6. Cách nào sau đây là cách hợp lệ để tải mô hìnhHugging Face từ Hub hoặc Spaces? <Question choices={[ { text: "gr.Interface.load('huggingface/{user}/{model_name}')", explain: "Đây là phương pháp hợp lệ để tải mô hình Hugging Face từ Hub", correct: true }, { text: "gr.Interface.load('model/{user}/{model_name}')", explain: "Đây là phương pháp hợp lệ để tải mô hình Hugging Face từ Hub", correct: true }, { text: "gr.Interface.load('demos/{user}/{model_name}')", explain: "Hãy thử lại - bạn không thể tải mô hình bằng cách sử dụng tiền tố 'demos'." }, { text: "gr.Interface.load('spaces/{user}/{model_name}')", explain: "Đây là phương pháp hợp lệ để tải mô hình Hugging Face từ Spaces", correct: true } ]} /> ### 7. Chọn tất cả các bước cần thiết để thêm trạng thái vào giao diện Gradio của bạn <Question choices={[ { text: "Truyền một tham số bổ sung vào hàm dự đoán của bạn, thể hiện trạng thái của giao diện.", explain: "Cần có thêm một tham số lưu trữ lịch sử hoặc trạng thái giao diện của bạn.", correct: true }, { text: "Ở cuối hàm dự đoán, hãy trả về giá trị đã cập nhật của trạng thái dưới dạng giá trị trả về bổ sung.", explain: "Giá trị lịch sử hoặc trạng thái này cần được hàm của bạn trả về.", correct: true }, { text: "Thêm thành phần đầu vào trạng thái và thành phần đầu ra trạng thái khi tạo Interface của bạn", explain: "Gradio cung cấp thành phần đầu vào và đầu ra trạng thái để duy trì dữ liệu.", correct: true } ]} /> ### 8. Những thành phần nào sau đây có trong thư viện Gradio? <Question choices={[ { text: "Textbox.", explain: "Đúng, bạn có thể tạo hộp văn bản với thành phần Hộp văn bản.", correct: true }, { text: "Graph.", explain: "Hiện không có thành phần này.", }, { text: "Image.", explain: "Đúng, bạn có thể tạo tiện ích tải lên hình ảnh bằng thành phần Image.", correct: true }, { text: "Audio.", explain: "Đúng, bạn có thể tạo tiện ích tải lên âm thanh bằng thành phần Audio.", correct: true }, ]} /> ### 9. Gradio `Blocks` cho phép bạn làm gì? <Question choices={[ { text: "Kết hợp nhiều bản demo vào một ứng dụng web", explain: "Bạn có thể sử dụng `with gradio.Tabs():` để thêm các tab cho nhiều bản demo", correct: true }, { text: "Gán các trình kích hoạt sự kiện, chẳng hạn như được nhấp/thay đổi/ v.v. cho các thành phần `Blocks`", explain: "Khi bạn gán một sự kiện, bạn truyền vào ba tham số: fn: hàm cần được gọi, inputs: (danh sách) của (các) thành phần đầu vào và outputs: (danh sách) các thành phần đầu ra nên gọi là.", correct: true }, { text: "Tự động xác định thành phần `Blocks` nào nên tương tác so với tĩnh", explain: "Dựa trên các trình kích hoạt sự kiện mà bạn xác định,`Blocks` tự động tìm ra liệu một thành phần có nên chấp nhận đầu vào của người dùng hay không.", correct: true }, { text: "Tạo bản demo đa bước; nghĩa là cho phép bạn sử dụng lại đầu ra của một thành phần làm đầu vào cho thành phần tiếp theo", explain: "Bạn có thể sử dụng một thành phần cho đầu vào của một trình kích hoạt sự kiện nhưng là đầu ra của một thành phần khác.", correct: true }, ]} /> ### 10. Bạn có thể chia sẻ liên kết công khai tới `Blocks` demo và tổ chức lưu trữ `Blocks` demo trên Hugging Face Spaces. <Question choices={[ { text: "Đúng", explain: "Cũng giống như `Interface`, tất cả các khả năng chia sẻ và lưu trữ các bản demo của `Blocks` đều giống nhau!", correct: true }, { text: "Sai", explain: "Cũng giống như `Interface`,tất cả các khả năng chia sẻ và lưu trữ các bản demo của `Blocks` đều giống nhau!", correct: false } ]} />
course/chapters/vi/chapter9/9.mdx/0
{ "file_path": "course/chapters/vi/chapter9/9.mdx", "repo_id": "course", "token_count": 5738 }
143
<FrameworkSwitchCourse {fw} /> # 模型 [[模型]] {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section3_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section3_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section3_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section3_tf.ipynb"}, ]} /> {/if} {#if fw === 'pt'} <Youtube id="AhChOFRegn4"/> {:else} <Youtube id="d3JVgghSOew"/> {/if} {#if fw === 'pt'} 在本节中,我们将更详细地了解如何创建和使用模型。我们将使用 AutoModel类,当您希望从检查点实例化任何模型时,这非常方便。 这个AutoModel类及其所有相关项实际上是对库中各种可用模型的简单包装。它是一个聪明的包装器,因为它可以自动猜测检查点的适当模型体系结构,然后用该体系结构实例化模型。 {:else} 在本节中,我们将更详细地了解如何创建和使用模型。我们将使用 AutoModel类,当您希望从检查点实例化任何模型时,这非常方便。 这个AutoModel类及其所有相关项实际上是对库中各种可用模型的简单包装。它是一个聪明的包装器,因为它可以自动猜测检查点的适当模型体系结构,然后用该体系结构实例化模型。 {/if} 但是,如果您知道要使用的模型类型,则可以使用直接定义其体系结构的类。让我们看看这是如何与BERT模型一起工作的。 ## 创建转换器 [[创建转换器]] 初始化BERT模型需要做的第一件事是加载配置对象: {#if fw === 'pt'} ```py from transformers import BertConfig, BertModel # Building the config config = BertConfig() # Building the model from the config model = BertModel(config) ``` {:else} ```py from transformers import BertConfig, TFBertModel # Building the config config = BertConfig() # Building the model from the config model = TFBertModel(config) ``` {/if} 配置包含许多用于构建模型的属性: ```py print(config) ``` ```python out BertConfig { [...] "hidden_size": 768, "intermediate_size": 3072, "max_position_embeddings": 512, "num_attention_heads": 12, "num_hidden_layers": 12, [...] } ``` 虽然您还没有看到所有这些属性都做了什么,但您应该认识到其中的一些属性:hidden_size属性定义了hidden_状态向量的大小,num_hidden_layers定义了Transformer模型的层数。 ### 不同的加载方式 [[不同的加载方式]] 从默认配置创建模型会使用随机值对其进行初始化: {#if fw === 'pt'} ```py from transformers import BertConfig, BertModel config = BertConfig() model = BertModel(config) # Model is randomly initialized! ``` {:else} ```py from transformers import BertConfig, TFBertModel config = BertConfig() model = TFBertModel(config) # Model is randomly initialized! ``` {/if} 该模型可以在这种状态下使用,但会输出胡言乱语;首先需要对其进行训练。我们可以根据手头的任务从头开始训练模型,但正如您在 [Chapter 1](/course/chapter1) ,这将需要很长的时间和大量的数据,并将产生不可忽视的环境影响。为了避免不必要的重复工作,必须能够共享和重用已经训练过的模型。 加载已经训练过的Transformers模型很简单-我们可以使用from_pretrained() 方法: {#if fw === 'pt'} ```py from transformers import BertModel model = BertModel.from_pretrained("bert-base-cased") ``` 正如您之前看到的,我们可以用等效的AutoModel类替换Bert模型。从现在开始,我们将这样做,因为这会产生检查点不可知的代码;如果您的代码适用于一个检查点,那么它应该与另一个检查点无缝地工作。即使体系结构不同,这也适用,只要检查点是针对类似任务(例如,情绪分析任务)训练的。 {:else} ```py from transformers import TFBertModel model = TFBertModel.from_pretrained("bert-base-cased") ``` 正如您之前看到的,我们可以用等效的AutoModel类替换Bert模型。从现在开始,我们将这样做,因为这会产生检查点不可知的代码;如果您的代码适用于一个检查点,那么它应该与另一个检查点无缝地工作。即使体系结构不同,这也适用,只要检查点是针对类似任务(例如,情绪分析任务)训练的。 {/if} 在上面的代码示例中,我们没有使用BertConfig ,而是通过Bert base cased标识符加载了一个预训练模型。这是一个模型检查点,由BERT的作者自己训练;您可以在 [model card](https://huggingface.co/bert-base-cased)中找到更多细节. 该模型现在使用检查点的所有权重进行初始化。它可以直接用于对训练过的任务进行推理,也可以对新任务进行微调。通过预先训练重量而不是从头开始的训练,我们可以很快取得好的效果。 权重已下载并缓存在缓存文件夹中(因此将来对from_pretrained()方法的调用将不会重新下载它们)默认为 ~/.cache/huggingface/transformers . 您可以通过设置 HF_HOME 环境变量来自定义缓存文件夹。 用于加载模型的标识符可以是模型中心Hub上任何模型的标识符,只要它与BERT体系结构兼容。可以找到可用的BERT检查点的完整列表 [here](https://huggingface.co/models?filter=bert) . ### 保存模型 [[保存模型]] 保存模型和加载模型一样简单--我们使用 save_pretrained() 方法,类似于 from_pretrained() 方法: ```py model.save_pretrained("directory_on_my_computer") ``` 这会将两个文件保存到磁盘: {#if fw === 'pt'} ``` ls directory_on_my_computer config.json pytorch_model.bin ``` {:else} ``` ls directory_on_my_computer config.json tf_model.h5 ``` {/if} 如果你看一下 config.json 文件,您将识别构建模型体系结构所需的属性。该文件还包含一些元数据,例如检查点的来源以及上次保存检查点时使用的🤗 Transformers版本。 {#if fw === 'pt'} 这个 *pytorch_model.bin* 文件就是众所周知的*state dictionary*; 它包含模型的所有权重。这两个文件齐头并进;配置是了解模型体系结构所必需的,而模型权重是模型的参数。 {:else} 这个 *pytorch_model.bin* 文件就是众所周知的*state dictionary*; 它包含模型的所有权重。这两个文件齐头并进;配置是了解模型体系结构所必需的,而模型权重是模型的参数。 {/if} ### 使用Transformers模型进行推理 [[使用Transformers模型进行推理]] 既然您知道了如何加载和保存模型,那么让我们尝试使用它进行一些预测。Transformer模型只能处理数字——分词器生成的数字。但在我们讨论标记化器之前,让我们先探讨模型接受哪些输入。 标记化器可以将输入转换为适当的框架张量,但为了帮助您了解发生了什么,我们将快速了解在将输入发送到模型之前必须做什么。 假设我们有几个序列: ```py sequences = ["Hello!", "Cool.", "Nice!"] ``` 分词器将这些转换为词汇表索引,通常称为 input IDs . 每个序列现在都是一个数字列表!结果是: ```py no-format encoded_sequences = [ [101, 7592, 999, 102], [101, 4658, 1012, 102], [101, 3835, 999, 102], ] ``` 这是一个编码序列列表:一个列表列表。张量只接受矩形(想想矩阵)。此“数组”已为矩形,因此将其转换为张量很容易: {#if fw === 'pt'} ```py import torch model_inputs = torch.tensor(encoded_sequences) ``` {:else} ```py import tensorflow as tf model_inputs = tf.constant(encoded_sequences) ``` {/if} ### 使用张量作为模型的输入 [[使用张量作为模型的输入]] 在模型中使用张量非常简单-我们只需将输入称为模型: ```python output = model(model_inputs) ``` 虽然模型接受许多不同的参数,但只需要 input IDs。我们稍后将解释其他参数的作用以及何时需要它们,但首先我们需要更仔细地了解 Transformer模型可以理解的输入的标记
course/chapters/zh-CN/chapter2/3.mdx/0
{ "file_path": "course/chapters/zh-CN/chapter2/3.mdx", "repo_id": "course", "token_count": 5205 }
144
# 构建模型卡片 [[构建模型卡片]] <CourseFloatingBanner chapter={4} classNames="absolute z-10 right-0 top-0" /> 模型卡片是一个配置文件,可以说与模型存储库中的模型和 tokenizer 文件一样重要。它包含了模型的核心定义,确保了社区成员可以复现模型的结果,并提供一个其他成员可以在这个模型基础上构建他们的组件的平台。 记录训练和评估过程并提供有关使用的数据以及已完成的预处理和后续处理的足够信息,有助于其他人了解对模型的能力——确保模型存在和目前的限制、偏差可以识别和理解。 因此,创建清晰定义模型的模型卡片是非常重要的一步。在这里,我们提供了一些可以帮助您解决此问题的方法。创建模型卡片是通过您之前看到的 Markdown 文件:README.md 。 “模型卡片”的概念源于谷歌的一个研究方向, Margaret Mitchell 等人在论文[“Model Cards for Model Reporting”](https://arxiv.org/abs/1810.03993)中首次提出,此处包含的许多信息均基于该论文,我们建议您查看这篇论文以了解为什么模型卡片在重视可重复性、可重用性和公平性的时候中如此重要。 模型卡通常以非常简短的概述开始,说明模型的用途,然后是模型卡片需要的其他信息: - 模型描述 - 预期用途和限制 - 如何使用 - 局限性和偏见 - 训练数据 - 训练程序 - 评价结果 让我们来看看每个部分应该包含什么。 ### 模型描述: [[模型描述:]] 提供了有关模型的基本详细信息。这包括架构、版本、如果它是在论文中介绍的,是否有原始的实现可用?作者以及有关模型的一般信息、任何版权都应归于此处。这一部分还可以提及有关训练程序、参数和重要免责声明的一般信息。 ### 预期用途和限制: [[预期用途和限制:]] 在此描述模型可以适用的例子,包括可以应用它的语言、领域。模型卡的这一部分还可以记录已知超出模型范围的区域,或者可能表现不佳的区域。 ### 使用方法: [[使用方法:]] 此部分应包括一些有关如何使用模型的示例。这可以展示使用 **pipeline()** 函数、模型和标记器类的使用以及其他任何您认为可能有帮助的代码。 ### 训练数据: [[训练数据:]] 这部分应该指出模型是在哪个数据集上训练的。也欢迎对数据集进行简要描述。 ### 训练过程: [[训练过程:]] 此部分中,您应该描述从再现性角度来看有用的训练的所有相关方面。这包括对数据进行的任何预处理和后处理,以及模型训练的批量数、批量大小、学习率等细节。 ### 变量和指标: [[变量和指标:]] 在这里,您应该描述您用于评估的指标,以及您测量的不同因素。提及使用了哪些指标、在哪个数据集上以及哪个数据集部分,可以轻松地将您的模型的性能与其他模型的性能进行比较。 ### 评价结果: [[评价结果:]] 这些应该提前在前面的部分告知,例如预期的使用效果和示例。最后,提供模型在评估数据集上的表现的指示。如果模型使用决策阈值,要么提供评估中使用的决策阈值,要么提供在不同阈值下针对预期用途进行评估的详细信息。 ## 例子 [[例子]] 查看以下几个精心制作的模型卡的例子: * [bert-base-cased](https://huggingface.co/bert-base-cased) * [gpt2](https://huggingface.co/gpt2) * [distilbert](https://huggingface.co/distilbert-base-uncased) 更多来自于不同组织和公司的示例可以在[这里](https://github.com/huggingface/model_card/blob/master/examples.md)查阅. ## 提示 [[提示]] 发布模型时不需要模型卡,制作一个模型时不需要包含上述所有部分。但是,模型的文档会使未来的用户受益,因此我们建议您尽自己的知识和能力填写尽可能多的部分。 ## 模型卡片元数据 [[模型卡片元数据]] 如果您对 Hugging Face Hub 进行了一些探索,您应该已经看到某些模型属于某些类别:您可以按任务、语言、库等对其进行过滤。模型所属的类别来自于您在模型卡片标题中添加的元数据。 例如,如果你看一下[`camembert-base` 模型卡片](https://huggingface.co/camembert-base/blob/main/README.md),您应该在模型卡标题中看到以下几行: ``` --- language: fr license: mit datasets: - oscar --- ``` 该元数据由 Hugging Face Hub 解析,然后将这个模型识别为法语模型,拥有 MIT 许可证,在 Oscar 数据集上训练。 允许的指定语言、许可证、标签、数据集、指标以及模型在训练时获得的评估结果在[全部模型卡片的规格](https://raw.githubusercontent.com/huggingface/huggingface_hub/main/modelcard.md)可以查阅。
course/chapters/zh-CN/chapter4/4.mdx/0
{ "file_path": "course/chapters/zh-CN/chapter4/4.mdx", "repo_id": "course", "token_count": 3404 }
145
# 标准化和预标记化 [[标准化和预标记化]] <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section4.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section4.ipynb"}, ]} /> 在我们更深入地研究与 Transformer 模型(字节对编码 [BPE]、WordPiece 和 Unigram)一起使用的三种最常见的子词标记化算法之前,我们将首先看一下每个标记器应用于文本的预处理。以下是标记化管道中步骤的高级概述: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter6/tokenization_pipeline.svg" alt="The tokenization pipeline."> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter6/tokenization_pipeline-dark.svg" alt="The tokenization pipeline."> </div> 在将文本拆分为子标记之前(根据其模型),分词器执行两个步骤: _normalization_ 和 _pre-tokenization_. ## 正常化 [[正常化]] <Youtube id="4IIC2jI9CaU"/> 标准化步骤涉及一些常规清理,例如删除不必要的空格、小写和/或删除重音符号。如果你熟悉[Unicode normalization](http://www.unicode.org/reports/tr15/)(例如 NFC 或 NFKC),这也是 tokenizer 可能应用的东西。 🤗Transformers **tokenizer** 有一个属性叫做 **backend_tokenizer** 它提供了对 🤗 Tokenizers 库中底层标记器的访问: ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") print(type(tokenizer.backend_tokenizer)) ``` ```python out <class 'tokenizers.Tokenizer'> ``` **normalizer** 的属性 **tokenizer** 对象有一个 **normalize_str()** 我们可以用来查看标准化是如何执行的方法: ```py print(tokenizer.backend_tokenizer.normalizer.normalize_str("Héllò hôw are ü?")) ``` ```python out 'hello how are u?' ``` 在这个例子中,因为我们选择了 **bert-base-uncased** 检查点,标准化应用小写并删除重音。 <Tip> ✏️ **试试看!** 从检查点加载标记器并将相同的示例传递给它。您可以看到分词器的带壳和无壳版本之间的主要区别是什么? </Tip> ## 预标记化 [[预标记化]] <Youtube id="grlLV8AIXug"/> 正如我们将在下一节中看到的,分词器不能单独在原始文本上进行训练。相反,我们首先需要将文本拆分为小实体,例如单词。这就是预标记化步骤的用武之地。 正如我们在[Chapter 2](/course/chapter2), 基于单词的标记器可以简单地将原始文本拆分为空白和标点符号的单词。这些词将是分词器在训练期间可以学习的子标记的边界。 要查看快速分词器如何执行预分词,我们可以使用 **pre_tokenize_str()** 的方法 **pre_tokenizer** 的属性 **tokenizer** 目的: ```py tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?") ``` ```python out [('Hello', (0, 5)), (',', (5, 6)), ('how', (7, 10)), ('are', (11, 14)), ('you', (16, 19)), ('?', (19, 20))] ``` 请注意分词器如何已经跟踪偏移量,这就是它如何为我们提供上一节中使用的偏移量映射。这里分词器忽略了这两个空格,只用一个替换它们,但偏移量在 **are** 和 **you** 考虑到这一点。 由于我们使用的是 BERT 分词器,预分词涉及对空格和标点符号进行拆分。对于这一步,其他标记器可以有不同的规则。例如,如果我们使用 GPT-2 标记器: ```py tokenizer = AutoTokenizer.from_pretrained("gpt2") tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?") ``` 它也会在空格和标点符号上拆分,但它会保留空格并将它们替换为 **Ġ** 符号,如果我们解码令牌,则使其能够恢复原始空格: ```python out [('Hello', (0, 5)), (',', (5, 6)), ('Ġhow', (6, 10)), ('Ġare', (10, 14)), ('Ġ', (14, 15)), ('Ġyou', (15, 19)), ('?', (19, 20))] ``` 另请注意,与 BERT 分词器不同,此分词器不会忽略双空格 最后一个例子,让我们看一下基于 SentencePiece 算法的 T5 分词器: ```py tokenizer = AutoTokenizer.from_pretrained("t5-small") tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?") ``` ```python out [('▁Hello,', (0, 6)), ('▁how', (7, 10)), ('▁are', (11, 14)), ('▁you?', (16, 20))] ``` 与 GPT-2 标记器一样,这个标记器保留空格并用特定标记替换它们( **_** ),但 T5 分词器只在空格上拆分,而不是标点符号。还要注意,它默认在句子的开头添加了一个空格(之前 **Hello** ) 并忽略了之间的双空格 **are** 和 **you** . 现在我们已经了解了一些不同的标记器如何处理文本,我们可以开始探索底层算法本身。我们首先快速浏览一下广泛适用的 SentencePiece;然后,在接下来的三个部分中,我们将研究用于子词标记化的三种主要算法是如何工作的。 ## 句子 [[句子]] [SentencePiece](https://github.com/google/sentencepiece) 是一种用于文本预处理的标记化算法,您可以将其与我们将在接下来的三个部分中看到的任何模型一起使用。它将文本视为 Unicode 字符序列,并用特殊字符替换空格, **▁** .与 Unigram 算法结合使用(参见[section 7](/course/chapter7/7)), 它甚至不需要预标记化步骤,这对于不使用空格字符的语言(如中文或日语)非常有用。 SentencePiece 的另一个主要特点是可逆标记化:由于没有对空格进行特殊处理,因此只需通过将它们连接起来并替换 **_** s 带空格——这会导致标准化的文本。正如我们之前看到的,BERT 分词器删除了重复的空格,因此它的分词是不可逆的。 ## 算法概述 [[算法概述]] 在下面的部分中,我们将深入研究三种主要的子词标记化算法:BPE(由 GPT-2 和其他人使用)、WordPiece(例如由 BERT 使用)和 Unigram(由 T5 和其他人使用)。在我们开始之前,这里是它们各自工作原理的快速概述。如果您还没有理解,请在阅读下一节后立即回到此表。 Model | BPE | WordPiece | Unigram :----:|:---:|:---------:|:------: Training | Starts from a small vocabulary and learns rules to merge tokens | Starts from a small vocabulary and learns rules to merge tokens | Starts from a large vocabulary and learns rules to remove tokens Training step | Merges the tokens corresponding to the most common pair | Merges the tokens corresponding to the pair with the best score based on the frequency of the pair, privileging pairs where each individual token is less frequent | Removes all the tokens in the vocabulary that will minimize the loss computed on the whole corpus Learns | Merge rules and a vocabulary | Just a vocabulary | A vocabulary with a score for each token Encoding | Splits a word into characters and applies the merges learned during training | Finds the longest subword starting from the beginning that is in the vocabulary, then does the same for the rest of the word | Finds the most likely split into tokens, using the scores learned during training 现在让我们深入了解 BPE!
course/chapters/zh-CN/chapter6/4.mdx/0
{ "file_path": "course/chapters/zh-CN/chapter6/4.mdx", "repo_id": "course", "token_count": 4034 }
146
# 出现错误时该怎么办 [[出现错误时该怎么办]] <DocNotebookDropdown classNames="absolute z-10 right-0 top-0" options={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/chapter8/section2.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/chapter8/section2.ipynb"}, ]} /> 在本节中, 我们将研究当你尝试从新调整的 Transformer 模型生成预测时可能发生的一些常见错误。这将为 [第四节](/course/chapter8/section4) 做准备, 我们将探索如何调试训练阶段本身。 <Youtube id="DQ-CpJn6Rc4"/> 我们为这一节准备了一个 [模板模型库](https://huggingface.co/lewtun/distilbert-base-uncased-finetuned-squad-d5716d28), 如果你想运行本章中的代码, 你首先需要将模型复制到你的 [Hugging Face Hub](https://huggingface.co) 账号。为此, 首先通过在 Jupyter 笔记本中运行以下任一命令来登录: ```python from huggingface_hub import notebook_login notebook_login() ``` 或在你最喜欢的终端中执行以下操作: ```bash huggingface-cli login ``` 这将提示你输入用户名和密码, 并将在下面保存一个令牌 *~/.cache/huggingface/*。登录后, 你可以使用以下功能复制模板存储库: ```python from distutils.dir_util import copy_tree from huggingface_hub import Repository, snapshot_download, create_repo, get_full_repo_name def copy_repository_template(): # Clone the repo and extract the local path template_repo_id = "lewtun/distilbert-base-uncased-finetuned-squad-d5716d28" commit_hash = "be3eaffc28669d7932492681cd5f3e8905e358b4" template_repo_dir = snapshot_download(template_repo_id, revision=commit_hash) # Create an empty repo on the Hub model_name = template_repo_id.split("/")[1] create_repo(model_name, exist_ok=True) # Clone the empty repo new_repo_id = get_full_repo_name(model_name) new_repo_dir = model_name repo = Repository(local_dir=new_repo_dir, clone_from=new_repo_id) # Copy files copy_tree(template_repo_dir, new_repo_dir) # Push to Hub repo.push_to_hub() ``` 现在, 当你调用 `copy_repository_template()` 时, 它将在你的帐户下创建模板存储库的副本。 ## 从 🤗 Transformers 调试管道 [[从 🤗 Transformers 调试管道]] 要开始我们调试 Transformer 模型的奇妙世界之旅, 请考虑以下场景: 你正在与一位同事合作进行问答项目, 以帮助电子商务网站的客户找到有关消费品的答案。你的同事给你发了一条消息, 比如: > 嗨! 我刚刚使用了抱抱脸课程的 [第七章](/course/chapter7/7) 中的技术进行了一个实验, 并在 SQuAD 上获得了一些很棒的结果! 我认为我们可以用这个模型作为我们项目的起点。Hub上的模型ID是 "lewtun/distillbert-base-uncased-finetuned-squad-d5716d28"。请随意测试一下 :) 你首先想到的是使用 🤗 Transformers 中的 `管道`: ```python from transformers import pipeline model_checkpoint = get_full_repo_name("distillbert-base-uncased-finetuned-squad-d5716d28") reader = pipeline("question-answering", model=model_checkpoint) ``` ```python out """ OSError: Can't load config for 'lewtun/distillbert-base-uncased-finetuned-squad-d5716d28'. Make sure that: - 'lewtun/distillbert-base-uncased-finetuned-squad-d5716d28' is a correct model identifier listed on 'https://huggingface.co/models' - or 'lewtun/distillbert-base-uncased-finetuned-squad-d5716d28' is the correct path to a directory containing a config.json file """ ``` 哦不对, 好像出了什么问题! 如果你是编程新手, 这些类型的错误一开始看起来有点神秘 (甚至是一个 `OSError`?!)。这里显示的错误只是一个更大的错误报告的最后一部分, 称为 _Python traceback_ (又名堆栈跟踪)。例如, 如果你在 Google Colab 上运行此代码, 你应该会看到类似于以下屏幕截图的内容: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/traceback.png" alt="A Python traceback." width="100%"/> </div> 这些报告中包含很多信息, 所以让我们一起来看看关键部分。首先要注意的是, 应该从 _从底部到顶部_ 读取回溯。如果你习惯于从上到下阅读英文文本, 这可能听起来很奇怪,但它反映了这样一个事实,即回溯显示了在下载模型和标记器时 `管道` 进行的函数调用序列。(查看 [第二章](/course/chapter2) 了解有关 `pipeline` 如何在后台工作的更多详细信息。) <Tip> 🚨 看到Google Colab 回溯中 "6 帧" 周围的蓝色框了吗? 这是 Colab 的一个特殊功能, 它将回溯压缩为"帧"。如果你似乎无法找到错误的来源, 请确保通过单击这两个小箭头来展开完整的回溯。 </Tip> 这意味着回溯的最后一行指示最后一条错误消息并给出引发的异常的名称。在这种情况下, 异常类型是`OSError`, 表示与系统相关的错误。如果我们阅读随附的错误消息, 我们可以看到模型的 *config.json* 文件似乎有问题, 我们给出了两个修复它的建议: ```python out """ Make sure that: - 'lewtun/distillbert-base-uncased-finetuned-squad-d5716d28' is a correct model identifier listed on 'https://huggingface.co/models' - or 'lewtun/distillbert-base-uncased-finetuned-squad-d5716d28' is the correct path to a directory containing a config.json file """ ``` <Tip> 💡 如果你遇到难以理解的错误消息, 只需将该消息复制并粘贴到 Google 或 [Stack Overflow](https://stackoverflow.com/) 搜索栏中 (是的, 真的!)。你很可能不是第一个遇到错误的人, 这是找到社区中其他人发布的解决方案的好方法。例如, 在 Stack Overflow 上搜索 `OSError: Can't load config for` 给出了几个[hits](https://stackoverflow.com/search?q=OSError%3A+Can%27t+load+config+for+), 可能是用作解决问题的起点。 </Tip> 第一个建议是要求我们检查模型ID是否真的正确, 所以首先要做的就是复制标识符并将其粘贴到Hub的搜索栏中: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/wrong-model-id.png" alt="The wrong model name." width="100%"/> </div> 嗯, 看起来我们同事的模型确实不在 Hub 上... 啊哈, 但是模型名称中有一个错字! DistilBERT 的名称中只有一个 "l", 所以让我们解决这个问题并寻找 "lewtun/distilbert-base-uncased-finetuned-squad-d5716d28": <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/true-model-id.png" alt="The right model name." width="100%"/> </div> 好的, 这很受欢迎。现在让我们尝试使用正确的模型 ID 再次下载模型: ```python model_checkpoint = get_full_repo_name("distilbert-base-uncased-finetuned-squad-d5716d28") reader = pipeline("question-answering", model=model_checkpoint) ``` ```python out """ OSError: Can't load config for 'lewtun/distilbert-base-uncased-finetuned-squad-d5716d28'. Make sure that: - 'lewtun/distilbert-base-uncased-finetuned-squad-d5716d28' is a correct model identifier listed on 'https://huggingface.co/models' - or 'lewtun/distilbert-base-uncased-finetuned-squad-d5716d28' is the correct path to a directory containing a config.json file """ ``` 啊, 再次挫败 -- 欢迎来到机器学习工程师的日常生活! 因为我们已经修复了模型 ID, 所以问题一定出在存储库本身。访问 🤗 Hub 上存储库内容的一种快速方法是通过 `huggingface_hub` 库的 `list_repo_files()` 方法: ```python from huggingface_hub import list_repo_files list_repo_files(repo_id=model_checkpoint) ``` ```python out ['.gitattributes', 'README.md', 'pytorch_model.bin', 'special_tokens_map.json', 'tokenizer_config.json', 'training_args.bin', 'vocab.txt'] ``` 有趣 -- 似乎没有配置文件存储库中的 *config.json* 文件! 难怪我们的 `pipeline` 无法加载模型; 我们的同事一定是在微调后忘记将这个文件推送到 Hub。在这种情况下, 问题似乎很容易解决: 我们可以要求他们添加文件, 或者, 因为我们可以从模型 ID 中看到使用的预训练模型是 [`distilbert-base-uncased`](https://huggingface.co/distilbert-base-uncased), 我们可以下载此模型的配置并将其推送到我们的存储库以查看是否可以解决问题。让我们试试看。使用我们在 [第二章](/course/chapter2) 中学习的技术, 我们使用 `AutoConfig` 类下载模型的配置: ```python from transformers import AutoConfig pretrained_checkpoint = "distilbert-base-uncased" config = AutoConfig.from_pretrained(pretrained_checkpoint) ``` <Tip warning={true}> 🚨 我们在这里采用的方法并不是万无一失的, 因为我们的同事可能在微调模型之前已经调整了 `distilbert-base-uncased` 配置。在现实生活中, 我们想首先检查它们, 但出于本节的目的, 我们假设它们使用默认配置。 </Tip> 然后我们可以使用配置的 `push_to_hub()` 方法将其推送到我们的模型存储库: ```python config.push_to_hub(model_checkpoint, commit_message="Add config.json") ``` 现在我们可以通过从最新提交的 `main` 分支中加载模型来测试这是否有效: ```python reader = pipeline("question-answering", model=model_checkpoint, revision="main") context = r""" Extractive Question Answering is the task of extracting an answer from a text given a question. An example of a question answering dataset is the SQuAD dataset, which is entirely based on that task. If you would like to fine-tune a model on a SQuAD task, you may leverage the examples/pytorch/question-answering/run_squad.py script. 🤗 Transformers is interoperable with the PyTorch, TensorFlow, and JAX frameworks, so you can use your favourite tools for a wide variety of tasks! """ question = "What is extractive question answering?" reader(question=question, context=context) ``` ```python out {'score': 0.38669535517692566, 'start': 34, 'end': 95, 'answer': 'the task of extracting an answer from a text given a question'} ``` 哇哦, 成功了!让我们回顾一下你刚刚学到的东西: - Python 中的错误消息称为 _tracebacks_ , 并从下到上阅读。错误消息的最后一行通常包含定位问题根源所需的信息。 - 如果最后一行没有包含足够的信息, 请按照您的方式进行回溯, 看看您是否可以确定源代码中发生错误的位置。 - 如果没有任何错误消息可以帮助您调试问题, 请尝试在线搜索类似问题的解决方案。 - `huggingface_hub` // 🤗 Hub? 库提供了一套工具, 你可以使用这些工具与 Hub 上的存储库进行交互和调试。 现在你知道如何调试管道, 让我们看一下模型本身前向传递中的一个更棘手的示例。 ## 调试模型的前向传递 [[调试模型的前向传递]] 尽管 `pipeline` 对于大多数需要快速生成预测的应用程序来说非常有用, 有时您需要访问模型的 logits (例如, 如果您有一些想要应用的自定义后处理)。为了看看在这种情况下会出现什么问题, 让我们首先从 `pipeline` 中获取模型和标记器: ```python tokenizer = reader.tokenizer model = reader.model ``` 接下来我们需要一个问题, 那么让我们看看是否支持我们最喜欢的框架: ```python question = "Which frameworks can I use?" ``` 正如我们在 [第七章](/course/chapter7) 中学习的, 我们需要采取的通常步骤是对输入进行标记化, 提取开始和结束标记的对数, 然后解码答案范围: ```python import torch inputs = tokenizer(question, context, add_special_tokens=True) input_ids = inputs["input_ids"][0] outputs = model(**inputs) answer_start_scores = outputs.start_logits answer_end_scores = outputs.end_logits # Get the most likely beginning of answer with the argmax of the score answer_start = torch.argmax(answer_start_scores) # Get the most likely end of answer with the argmax of the score answer_end = torch.argmax(answer_end_scores) + 1 answer = tokenizer.convert_tokens_to_string( tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) ) print(f"Question: {question}") print(f"Answer: {answer}") ``` ```python out """ --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) /var/folders/28/k4cy5q7s2hs92xq7_h89_vgm0000gn/T/ipykernel_75743/2725838073.py in <module> 1 inputs = tokenizer(question, text, add_special_tokens=True) 2 input_ids = inputs["input_ids"] ----> 3 outputs = model(**inputs) 4 answer_start_scores = outputs.start_logits 5 answer_end_scores = outputs.end_logits ~/miniconda3/envs/huggingface/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1050 or _global_forward_hooks or _global_forward_pre_hooks): -> 1051 return forward_call(*input, **kwargs) 1052 # Do not call functions when jit is used 1053 full_backward_hooks, non_full_backward_hooks = [], [] ~/miniconda3/envs/huggingface/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py in forward(self, input_ids, attention_mask, head_mask, inputs_embeds, start_positions, end_positions, output_attentions, output_hidden_states, return_dict) 723 return_dict = return_dict if return_dict is not None else self.config.use_return_dict 724 --> 725 distilbert_output = self.distilbert( 726 input_ids=input_ids, 727 attention_mask=attention_mask, ~/miniconda3/envs/huggingface/lib/python3.8/site-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 1049 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks 1050 or _global_forward_hooks or _global_forward_pre_hooks): -> 1051 return forward_call(*input, **kwargs) 1052 # Do not call functions when jit is used 1053 full_backward_hooks, non_full_backward_hooks = [], [] ~/miniconda3/envs/huggingface/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py in forward(self, input_ids, attention_mask, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict) 471 raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") 472 elif input_ids is not None: --> 473 input_shape = input_ids.size() 474 elif inputs_embeds is not None: 475 input_shape = inputs_embeds.size()[:-1] AttributeError: 'list' object has no attribute 'size' """ ``` 噢, 看起来我们的代码中有一个错误!但我们不怕一点调试。您可以在笔记本中使用 Python 调试器: <Youtube id="rSPyvPw0p9k"/> 或在终端中: <Youtube id="5PkZ4rbHL6c"/> 在这里, 阅读错误消息告诉我们 `'list' object has no attribute 'size'`, 我们可以看到一个 `-->` 箭头指向 `model(**inputs)` 中出现问题的行。你可以使用 Python 调试器以交互方式调试它, 但现在我们只需打印出一部分 `inputs`, 看看我们有什么: ```python inputs["input_ids"][:5] ``` ```python out [101, 2029, 7705, 2015, 2064] ``` 这当然看起来像一个普通的 Python `list`, 但让我们仔细检查一下类型: ```python type(inputs["input_ids"]) ``` ```python out list ``` 是的, 这肯定是一个 Python `list`。那么出了什么问题呢? 回忆 [第二章](/course/chapter2) 🤗 Transformers 中的 `AutoModelForXxx` 类在 _tensors_ 上运行(PyTorch或者or TensorFlow), 一个常见的操作是使用 `Tensor.size()` 方法提取张量的维度, 例如, 在 PyTorch 中。让我们再看看回溯, 看看哪一行触发了异常: ``` ~/miniconda3/envs/huggingface/lib/python3.8/site-packages/transformers/models/distilbert/modeling_distilbert.py in forward(self, input_ids, attention_mask, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict) 471 raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") 472 elif input_ids is not None: --> 473 input_shape = input_ids.size() 474 elif inputs_embeds is not None: 475 input_shape = inputs_embeds.size()[:-1] AttributeError: 'list' object has no attribute 'size' ``` 看起来我们的代码试图调用 `input_ids.size()`, 但这显然不适用于 Python `list`, 这只是一个容器。我们如何解决这个问题? 在 Stack Overflow 上搜索错误消息给出了很多相关的 [hits](https://stackoverflow.com/search?q=AttributeError%3A+%27list%27+object+has+no+attribute+%27size%27&s=c15ec54c-63cb-481d-a749-408920073e8f)。单击第一个会显示与我们类似的问题, 答案如下面的屏幕截图所示: <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter8/stack-overflow.png" alt="An answer from Stack Overflow." width="100%"/> </div> 答案建议我们添加 `return_tensors='pt'` 到标记器, 所以让我们看看这是否适合我们: ```python out inputs = tokenizer(question, context, add_special_tokens=True, return_tensors="pt") input_ids = inputs["input_ids"][0] outputs = model(**inputs) answer_start_scores = outputs.start_logits answer_end_scores = outputs.end_logits # Get the most likely beginning of answer with the argmax of the score answer_start = torch.argmax(answer_start_scores) # Get the most likely end of answer with the argmax of the score answer_end = torch.argmax(answer_end_scores) + 1 answer = tokenizer.convert_tokens_to_string( tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]) ) print(f"Question: {question}") print(f"Answer: {answer}") ``` ```python out """ Question: Which frameworks can I use? Answer: pytorch, tensorflow, and jax """ ``` 不错, 成功了! 这是 Stack Overflow 非常有用的一个很好的例子: 通过识别类似的问题, 我们能够从社区中其他人的经验中受益。然而, 像这样的搜索并不总是会产生相关的答案, 那么在这种情况下你能做什么呢? 幸运的是, 有一个受欢迎的开发者社区 [Hugging Face forums](https://discuss.huggingface.co/) 可以帮助你! 在下一节中, 我们将看看如何设计可能得到回答的优秀论坛问题。
course/chapters/zh-CN/chapter8/2.mdx/0
{ "file_path": "course/chapters/zh-CN/chapter8/2.mdx", "repo_id": "course", "token_count": 9532 }
147
# Part 2 发布活动 [[Part 2 发布活动]] 对于课程第 2 部分的发布,我们在微调 sprint 之前组织了一场现场活动,为期两天的会谈。 如果你错过了,你可以赶上下面列出的讲座! ## Day 1: Transformer 的高级API以及如何训练它们 [[Day 1: Transformer 的高级API以及如何训练它们]] **Thomas Wolf:** *迁移学习和Transformers库的诞生* <div class="flex justify-center"> <Youtube id="wCYVeahJES0"/> </div> <p align="center"> <img src="https://i.imgur.com/9eq8oUi.png" alt="一张图总结 Thom 的演讲" width="80%"/> </p> Thomas Wolf 是 Hugging Face 的联合创始人兼首席科学官。 Thomas Wolf 和 Hugging Face 团队创建的工具被 5,000 多个研究机构使用,包括 Facebook 人工智能研究、谷歌研究、DeepMind、亚马逊研究、苹果、艾伦人工智能研究所以及大多数大学系。 Thomas Wolf 是人工智能领域有史以来最大的研究合作的发起人和高级主席:[“BigScience”](https://bigscience.huggingface.co),以及一组广泛使用的 [库和工具](https://github.com/huggingface/)。 Thomas Wolf 还是一位多产的教育家、人工智能和自然语言处理领域的思想领袖,并且经常受邀在世界各地的会议上发表演讲 [https://thomwolf.io](https://thomwolf.io )。 **Jay Alammar:** *Transformers模型的图解* <div class="flex justify-center"> <Youtube id="VzvG23gmcYU"/> </div> <p align="center"> <img src="https://i.imgur.com/rOZAuE9.png" alt="一张图总结 Jay 的演讲" width="80%"/> </p> 通过他广受欢迎的 ML 博客,Jay 帮助数百万研究人员和工程师直观地理解了机器学习工具和概念,从基础(最终出现在 NumPy、Pandas 文档)到前沿(Transformers、BERT、GPT-3)。 **Margaret Mitchell:** *关于机器学习开发中的价值观* <div class="flex justify-center"> <Youtube id="8j9HRMjh_s8"/> </div> <p align="center"> <img src="https://i.imgur.com/NuIsnY3.png" alt="一张图总结 Margaret 的演讲" width="80%"/> </p> Margaret Mitchell 是一名从事人工智能伦理研究的研究员,目前专注于以伦理为依据的人工智能开发。她在自然语言生成、辅助技术、计算机视觉和人工智能伦理方面发表了 50 多篇论文,并在会话生成和情感分类领域拥有多项专利。她之前曾在 Google AI 担任员工研究科学家,在那里她创立并共同领导了 Google 的伦理 AI 小组,专注于基础 AI 伦理研究和在 Google 内部实施 AI 伦理。在加入谷歌之前,她是微软研究院的一名研究员,专注于计算机视觉到语言的生成;并且是约翰霍普金斯大学的博士后,专注于贝叶斯建模和信息提取。她拥有阿伯丁大学计算机科学博士学位和华盛顿大学计算语言学硕士学位。在获得学位的同时,她还于 2005 年至 2012 年在俄勒冈健康与科学大学从事机器学习、神经系统疾病和辅助技术方面的工作。她在多样性、包容性、计算机科学和伦理学的交叉领域领导了许多研讨会和倡议。她的工作获得了国防部长阿什卡特和美国盲人基金会的奖励,并被多家科技公司实施。她喜欢园艺、狗和猫。 **Matthew Watson 和 Chen Qian:** *使用 Keras 的 NLP 工作流程* <div class="flex justify-center"> <Youtube id="gZIP-_2XYMM"/> </div> <p align="center"> <img src="https://i.imgur.com/1vD2az8.png" alt="一张图总结 Matt 和 Chen 的演讲" width="80%"/> </p> Matthew Watson 是 Keras 团队的机器学习工程师,专注于高级建模 API。 他在本科期间学习计算机图形学,并在斯坦福大学获得硕士学位。 作为一名几乎是英语专业的学生,他转向计算机科学,热衷于跨学科工作并使 NLP 为更广泛的受众所接受。 Chen Qian 是 Keras 团队的一名软件工程师,专注于高级建模 API。 Chen 在斯坦福大学获得电气工程硕士学位,他对简化 ML 任务和大规模 ML 的代码实现特别感兴趣。 **Mark Saroufim:** *如何使用 Pytorch 训练模型* <div class="flex justify-center"> <Youtube id="KmvPlW2cbIo"/> </div> <p align="center"> <img src="https://i.imgur.com/TPmlkm8.png" alt="一张图总结 Mark 的演讲" width="80%"/> </p> Mark Saroufim 是 Pytorch 的合作伙伴工程师,致力于开发 OSS 生产工具,包括 TorchServe 和 Pytorch Enterprise。 Mark 是 Graphcore、[yuri.ai](http://yuri.ai/)、Microsoft 和 NASA 的 JPL 的应用科学家和产品经理。 他热衷于让编程更有趣。 **Jakob Uszkoreit:** *它没有坏所以<del>不要修复</del>让我们打破它* <div class="flex justify-center"> <Youtube id="C6jweXYFHSA"/> </div> <p align="center"> <img src="https://i.imgur.com/5dWQeNB.png" alt="一张图总结 Jakob 的演讲" width="80%"/> </p> Jakob Uszkoreit 是 Inceptive 的联合创始人。 Inceptive 在紧密循环中使用大规模深度学习和高通量实验设计用于疫苗和治疗的 RNA 分子,目标是使基于 RNA 的药物更容易获得、更有效和更广泛适用。 此前,Jakob 在谷歌工作了十多年,领导谷歌大脑、研究和搜索领域的研发团队,致力于深度学习基础、计算机视觉、语言理解和机器翻译。 ## Day 2: 可以使用的工具 [[Day 2: 可以使用的工具]] **Lewis Tunstall:** *使用 🤗 Transformers Trainer 让训练更加简单* <div class="flex justify-center"> <Youtube id="u--UVvH-LIQ"/> </div> Lewis 是 Hugging Face 的机器学习工程师,专注于开发开源工具并让更广泛的社区可以访问它们。 他还是 O'Reilly 即将出版的有关于Transform的合著者,您可以在 Twitter (@_lewtun) 上关注他,了解 NLP 提示和技巧! **Matthew Carrigan:** *用于 🤗 Transformers 和 🤗 Datasets的新 TensorFlow 特性* <div class="flex justify-center"> <Youtube id="gQUlXp1691w"/> </div> Matt 负责Transformers的TensorFlow维护,并将最终领导一场针对现任PyTorch派系的政变,可能会通过他的推特账户@carrigmat进行协调。 **Lysandre Debut:** *使用Hugging Face Hub 作为协作和共享机器学习项目* <div class="flex justify-center"> <Youtube id="RBw1TmdEZp0"/> </div> <p align="center"> <img src="https://i.imgur.com/TarIPCz.png" alt="一张图总结 Lysandre 的演讲" width="80%"/> </p> Lysandre 是 Hugging Face 的机器学习工程师,他参与了许多开源项目。 他的目标是通过使用非常简单的 API 开发强大的工具,让每个人都可以使用机器学习。 **Lucile Saulnier:** *使用 🤗 Transformers 和 🤗 Tokenizers 获取您自己的tokenizer* <div class="flex justify-center"> <Youtube id="UkNmyTFKriI"/> </div> Lucile 是 Hugging Face 的机器学习工程师,负责开发和支持开源工具的使用。 她还积极参与了自然语言处理领域的许多研究项目,例如协作训练模型和 BigScience。 **Sylvain Gugger:** *使用 🤗 Accelerate* 增强您的 PyTorch 训练循环* <div class="flex justify-center"> <Youtube id="t8Krzu-nSeY"/> </div> Sylvain 是 Hugging Face 的研究工程师,也是🤗 Transformers 的核心维护者之一,也是🤗 Accelerate 的开发者。 他喜欢让模型训练变得更容易。 **Merve Noyan:** *使用 🤗 Spaces 展示您的模型演示* <div class="flex justify-center"> <Youtube id="vbaKOa4UXoM"/> </div> Merve 是 Hugging Face 的开发者倡导者,致力于开发工具并围绕它们构建内容,以使每个人的机器学习民主化。 **Abubakar Abid:** *快速构建机器学习应用程序* <div class="flex justify-center"> <Youtube id="c7mle2yYpwQ"/> </div> <p align="center"> <img src="https://i.imgur.com/qWIFeiF.png" alt="一张图总结 Abubakar 的演讲" width="80%"/> </p> Abubakar Abid 是 [Gradio](www.gradio.app) 的首席执行官。 他于 2015 年获得麻省理工学院电气工程和计算机科学学士学位,并于 2021 年获得斯坦福大学应用机器学习博士学位。作为 Gradio 的首席执行官,Abubakar 致力于使机器学习模型更易于演示、调试和部署。 **Mathieu Desvé:** *AWS ML Vision:让所有客户都可以使用机器学习* <div class="flex justify-center"> <Youtube id="O2e3pXO4aRE"/> </div> <p align="center"> <img src="https://i.imgur.com/oLdZTKy.png" alt="一张图总结 Mathieu 的演讲" width="80%"/> </p> 技术爱好者,有空闲时间的创客。 我喜欢挑战和解决客户和用户的问题,每天和有才华的人一起学习。 自 2004 年以来,我在前端、后端、基础设施、运营和管理等多个职位上工作。 尝试以敏捷的方式解决公共技术和管理问题。 **Philipp Schmid:** *使用 Amazon SageMaker 和🤗 Transformers 进行托管训练* <div class="flex justify-center"> <Youtube id="yG6J2Zfo8iw"/> </div> Philipp Schmid 是 Hugging Face 的机器学习工程师和技术主管,负责领导与 Amazon SageMaker 团队的合作。 他热衷于使尖端 NLP 模型民主化和生产化,并提高深度学习的易用性。
course/chapters/zh-CN/events/2.mdx/0
{ "file_path": "course/chapters/zh-CN/events/2.mdx", "repo_id": "course", "token_count": 5495 }
148
<FrameworkSwitchCourse {fw} /> # 標記器(Tokenizer) {#if fw === 'pt'} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section4_pt.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section4_pt.ipynb"}, ]} /> {:else} <CourseFloatingBanner chapter={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section4_tf.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter2/section4_tf.ipynb"}, ]} /> {/if} <Youtube id="VFp38yj8h3A"/> 標記器(Tokenizer)是 NLP 管道的核心組件之一。它們有一個目的:將文本轉換為模型可以處理的數據。模型只能處理數字,因此標記器(Tokenizer)需要將我們的文本輸入轉換為數字數據。在本節中,我們將確切地探討標記化管道中發生的事情。 在 NLP 任務中,通常處理的數據是原始文本。這是此類文本的示例 ``` Jim Henson was a puppeteer ``` 但是,模型只能處理數字,因此我們需要找到一種將原始文本轉換為數字的方法。這就是標記器(tokenizer)所做的,並且有很多方法可以解決這個問題。目標是找到最有意義的表示——即對模型最有意義的表示——並且如果可能的話,找到最小的表示。 讓我們看一下標記化算法的一些示例,並嘗試回答您可能對標記化提出的一些問題。 ## 基於詞的(Word-based) <Youtube id="nhJxYji1aho"/> 想到的第一種標記器是基於詞的(_word-based_).它通常很容易設置和使用,只需幾條規則,並且通常會產生不錯的結果。例如,在下圖中,目標是將原始文本拆分為單詞併為每個單詞找到一個數字表示: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/word_based_tokenization.svg" alt="An example of word-based tokenization."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/word_based_tokenization-dark.svg" alt="An example of word-based tokenization."/> </div> 有多種方法可以拆分文本。例如,我們可以通過應用Python的`split()`函數,使用空格將文本標記為單詞: ```py tokenized_text = "Jim Henson was a puppeteer".split() print(tokenized_text) ``` ```python out ['Jim', 'Henson', 'was', 'a', 'puppeteer'] ``` 還有一些單詞標記器的變體,它們具有額外的標點符號規則。使用這種標記器,我們最終可以得到一些非常大的“詞彙表”,其中詞彙表由我們在語料庫中擁有的獨立標記的總數定義。 每個單詞都分配了一個 ID,從 0 開始一直到詞彙表的大小。該模型使用這些 ID 來識別每個單詞。 如果我們想用基於單詞的標記器(tokenizer)完全覆蓋一種語言,我們需要為語言中的每個單詞都有一個標識符,這將生成大量的標記。例如,英語中有超過 500,000 個單詞,因此要構建從每個單詞到輸入 ID 的映射,我們需要跟蹤這麼多 ID。此外,像“dog”這樣的詞與“dogs”這樣的詞的表示方式不同,模型最初無法知道“dog”和“dogs”是相似的:它會將這兩個詞識別為不相關。這同樣適用於其他相似的詞,例如“run”和“running”,模型最初不會認為它們是相似的。 最後,我們需要一個自定義標記(token)來表示不在我們詞彙表中的單詞。這被稱為“未知”標記(token),通常表示為“[UNK]”或"&lt;unk&gt;"。如果你看到標記器產生了很多這樣的標記,這通常是一個不好的跡象,因為它無法檢索到一個詞的合理表示,並且你會在這個過程中丟失信息。製作詞彙表時的目標是以這樣一種方式進行,即標記器將盡可能少的單詞標記為未知標記。 減少未知標記數量的一種方法是使用更深一層的標記器(tokenizer),即基於字符的(_character-based_)標記器(tokenizer)。 ## 基於字符(Character-based) <Youtube id="ssLq_EK2jLE"/> 基於字符的標記器(tokenizer)將文本拆分為字符,而不是單詞。這有兩個主要好處: - 詞彙量要小得多。 - 詞彙外(未知)標記(token)要少得多,因為每個單詞都可以從字符構建。 但是這裡也出現了一些關於空格和標點符號的問題: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/character_based_tokenization.svg" alt="An example of character-based tokenization."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/character_based_tokenization-dark.svg" alt="An example of character-based tokenization."/> </div> 這種方法也不是完美的。由於現在表示是基於字符而不是單詞,因此人們可能會爭辯說,從直覺上講,它的意義不大:每個字符本身並沒有多大意義,而單詞就是這種情況。然而,這又因語言而異;例如,在中文中,每個字符比拉丁語言中的字符包含更多的信息。 另一件要考慮的事情是,我們的模型最終會處理大量的詞符(token):雖然使用基於單詞的標記器(tokenizer),單詞只會是單個標記,但當轉換為字符時,它很容易變成 10 個或更多的詞符(token)。 為了兩全其美,我們可以使用結合這兩種方法的第三種技術:*子詞標記化(subword tokenization)*。 ## 子詞標記化 <Youtube id="zHvTiHr506c"/> 子詞分詞算法依賴於這樣一個原則,即不應將常用詞拆分為更小的子詞,而應將稀有詞分解為有意義的子詞。 例如,“annoyingly”可能被認為是一個罕見的詞,可以分解為“annoying”和“ly”。這兩者都可能作為獨立的子詞出現得更頻繁,同時“annoyingly”的含義由“annoying”和“ly”的複合含義保持。 這是一個示例,展示了子詞標記化算法如何標記序列“Let's do tokenization!”: <div class="flex justify-center"> <img class="block dark:hidden" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/bpe_subword.svg" alt="A subword tokenization algorithm."/> <img class="hidden dark:block" src="https://huggingface.co/datasets/huggingface-course/documentation-images/resolve/main/en/chapter2/bpe_subword-dark.svg" alt="A subword tokenization algorithm."/> </div> 這些子詞最終提供了很多語義含義:例如,在上面的示例中,“tokenization”被拆分為“token”和“ization”,這兩個具有語義意義同時節省空間的詞符(token)(只需要兩個標記(token)代表一個長詞)。這使我們能夠對較小的詞彙表進行相對較好的覆蓋,並且幾乎沒有未知的標記 這種方法在土耳其語等粘著型語言(agglutinative languages)中特別有用,您可以通過將子詞串在一起來形成(幾乎)任意長的複雜詞。 ### 還有更多! 不出所料,還有更多的技術。僅舉幾例: - Byte-level BPE, 用於 GPT-2 - WordPiece, 用於 BERT - SentencePiece or Unigram, 用於多個多語言模型 您現在應該對標記器(tokenizers)的工作原理有足夠的瞭解,以便開始使用 API。 ## 加載和保存 加載和保存標記器(tokenizer)就像使用模型一樣簡單。實際上,它基於相同的兩種方法: `from_pretrained()` 和 `save_pretrained()` 。這些方法將加載或保存標記器(tokenizer)使用的算法(有點像*建築學(architecture)*的模型)以及它的詞彙(有點像*權重(weights)*模型)。 加載使用與 BERT 相同的檢查點訓練的 BERT 標記器(tokenizer)與加載模型的方式相同,除了我們使用 `BertTokenizer` 類: ```py from transformers import BertTokenizer tokenizer = BertTokenizer.from_pretrained("bert-base-cased") ``` {#if fw === 'pt'} 如同 `AutoModel`,`AutoTokenizer` 類將根據檢查點名稱在庫中獲取正確的標記器(tokenizer)類,並且可以直接與任何檢查點一起使用: {:else} 如同 `TFAutoModel`, `AutoTokenizer` 類將根據檢查點名稱在庫中獲取正確的標記器(tokenizer)類,並且可以直接與任何檢查點一起使用: {/if} ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") ``` 我們現在可以使用標記器(tokenizer),如上一節所示: ```python tokenizer("Using a Transformer network is simple") ``` ```python out {'input_ids': [101, 7993, 170, 11303, 1200, 2443, 1110, 3014, 102], 'token_type_ids': [0, 0, 0, 0, 0, 0, 0, 0, 0], 'attention_mask': [1, 1, 1, 1, 1, 1, 1, 1, 1]} ``` 保存標記器(tokenizer)與保存模型相同: ```py tokenizer.save_pretrained("directory_on_my_computer") ``` 我們在[Chapter 3](/Couse/chapter3)中將更多地談論`token_type_ids`,稍後我們將解釋 `attention_mask` 鍵。首先,讓我們看看 `input_ids` 如何生成。為此,我們需要查看標記器(tokenizer)的中間方法。 ## 編碼 <Youtube id="Yffk5aydLzg"/> 將文本翻譯成數字被稱為編碼(_encoding_).編碼分兩步完成:標記化,然後轉換為輸入 ID。 正如我們所見,第一步是將文本拆分為單詞(或單詞的一部分、標點符號等),通常稱為*標記(token)*。有多個規則可以管理該過程,這就是為什麼我們需要使用模型名稱來實例化標記器(tokenizer),以確保我們使用模型預訓練時使用的相同規則。 第二步是將這些標記轉換為數字,這樣我們就可以用它們構建一個張量並將它們提供給模型。為此,標記器(tokenizer)有一個*詞彙(vocabulary)*,這是我們在實例化它時下載的部分 `from_pretrained()` 方法。同樣,我們需要使用模型預訓練時使用的相同詞彙。 為了更好地理解這兩個步驟,我們將分別探討它們。請注意,我們將使用一些單獨執行部分標記化管道的方法來向您展示這些步驟的中間結果,但實際上,您應該直接在您的輸入上調用標記器(tokenizer)(如第 2 部分所示)。 ### 標記化 標記化過程由標記器(tokenizer)的`tokenize()` 方法實現: ```py from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") sequence = "Using a Transformer network is simple" tokens = tokenizer.tokenize(sequence) print(tokens) ``` 此方法的輸出是一個字符串列表或標記(token): ```python out ['Using', 'a', 'transform', '##er', 'network', 'is', 'simple'] ``` 這個標記器(tokenizer)是一個子詞標記器(tokenizer):它對詞進行拆分,直到獲得可以用其詞彙表表示的標記(token)。`transformer` 就是這種情況,它分為兩個標記:`transform` 和 `##er`。 ### 從詞符(token)到輸入 ID 輸入 ID 的轉換由標記器(tokenizer)的`convert_tokens_to_ids()`方法實現: ```py ids = tokenizer.convert_tokens_to_ids(tokens) print(ids) ``` ```python out [7993, 170, 11303, 1200, 2443, 1110, 3014] ``` 這些輸出一旦轉換為適當的框架張量,就可以用作模型的輸入,如本章前面所見。 <Tip> ✏️ **試試看!** 在我們在第 2 節中使用的輸入句子(“I've been waiting for a HuggingFace course my whole life.”和“I hate this so much!”)複製最後兩個步驟(標記化和轉換為輸入 ID)。檢查您獲得的輸入 ID 是否與我們之前獲得的相同! </Tip> ## 解碼 *解碼(Decoding)* 正好相反:從詞彙索引中,我們想要得到一個字符串。這可以通過 `decode()` 方法實現,如下: ```py decoded_string = tokenizer.decode([7993, 170, 11303, 1200, 2443, 1110, 3014]) print(decoded_string) ``` ```python out 'Using a Transformer network is simple' ``` 請注意, `decode` 方法不僅將索引轉換回標記(token),還將屬於相同單詞的標記(token)組合在一起以生成可讀的句子。當我們使用預測新文本的模型(根據提示生成的文本,或序列到序列問題(如翻譯或摘要))時,這種行為將非常有用。 到現在為止,您應該瞭解標記器(tokenizer)可以處理的原子操作:標記化、轉換為 ID 以及將 ID 轉換回字符串。然而,我們只是刮到了冰山一角。在下一節中,我們將採用我們的方法來克服它的限制,並看看如何克服它們。
course/chapters/zh-TW/chapter2/4.mdx/0
{ "file_path": "course/chapters/zh-TW/chapter2/4.mdx", "repo_id": "course", "token_count": 8024 }
149
# Part 1 完結! <CourseFloatingBanner chapter={4} classNames="absolute z-10 right-0 top-0" /> 這是課程第一部分的結尾!第 2 部分將在 11 月 15 日與大型社區活動一起發佈,[點擊這裡](https://huggingface.co/blog/course-launch-event)查看更多信息. 您現在應該能夠針對文本分類問題(單個或成對句子)對預訓練模型進行微調,並將結果上傳到模型中心。為確保您掌握了第一部分的內容,您應該針對您感興趣的想法進行嘗試(不一定是英語)!一旦你完成,您可以在[Hugging Face 社區](https://discuss.huggingface.co/)的[這個話題](https://discuss.huggingface.co/t/share-your-projects/6803)分享您的項目。 我們迫不及待地想看看您將用它構建什麼!
course/chapters/zh-TW/chapter4/5.mdx/0
{ "file_path": "course/chapters/zh-TW/chapter4/5.mdx", "repo_id": "course", "token_count": 508 }
150
# 字節對編碼標記化 <CourseFloatingBanner chapter={6} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Google Colab", value: "https://colab.research.google.com/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section5.ipynb"}, {label: "Aws Studio", value: "https://studiolab.sagemaker.aws/import/github/huggingface/notebooks/blob/master/course/zh-CN/chapter6/section5.ipynb"}, ]} /> 字節對編碼(BPE)最初被開發為一種壓縮文本的算法,然後在預訓練 GPT 模型時被 OpenAI 用於標記化。許多 Transformer 模型都使用它,包括 GPT、GPT-2、RoBERTa、BART 和 DeBERTa。 <Youtube id="HEikzVL-lZU"/> <Tip> 💡 本節深入介紹了BPE,甚至展示了一個完整的實現。如果你只想大致瞭解標記化算法,可以跳到最後。 </Tip> ## 訓練算法 BPE 訓練首先計算語料庫中使用的唯一單詞集(在完成標準化和預標記化步驟之後),然後通過獲取用於編寫這些單詞的所有符號來構建詞彙表。舉一個簡單的例子,假設我們的語料庫使用了這五個詞: ``` "hug", "pug", "pun", "bun", "hugs" ``` 基礎詞彙將是 `["b", "g", "h", "n", "p", "s", "u"]`。對於實際情況,基本詞彙表將包含所有 ASCII 字符,至少,可能還包含一些 Unicode 字符。如果您正在標記的示例使用不在訓練語料庫中的字符,則該字符將轉換為未知標記。這就是為什麼許多 NLP 模型在分析帶有表情符號的內容方面非常糟糕的原因之一。 <Tip> TGPT-2 和 RoBERTa 標記器(非常相似)有一個聰明的方法來處理這個問題: 他們不把單詞看成是用 Unicode 字符寫的,而是用字節寫的。這樣,基本詞彙表的大小很小(256),但你能想到的每個字符仍將被包含在內,而不會最終轉換為未知標記。這個技巧被稱為 *字節級 BPE*。 </Tip> 獲得這個基本詞彙後,我們添加新的標記,直到通過學習*合併*達到所需的詞彙量,這是將現有詞彙表的兩個元素合併為一個新元素的規則。因此在開始時,這些合併將創建具有兩個字符的標記,然後隨著訓練的進行,會創建更長的子詞。 在分詞器訓練期間的任何一步,BPE 算法都會搜索最常見的現有標記對 ("對",這裡我們指的是單詞中的兩個連續標記)。最頻繁的一對將被合併,我們沖洗並重複下一步。 回到我們之前的例子,讓我們假設單詞具有以下頻率: ``` ("hug", 10), ("pug", 5), ("pun", 12), ("bun", 4), ("hugs", 5) ``` 意味著 `"hug"` 在語料庫中出現了10次, `"pug"` 5次, `"pun"` 12次, `"bun"` 4次, 以及 `"hugs"` 5次。我們通過將每個單詞拆分為字符(形成我們初始詞彙表的字符)來開始訓練,這樣我們就可以將每個單詞視為一個標記列表: ``` ("h" "u" "g", 10), ("p" "u" "g", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "u" "g" "s", 5) ``` 然後我們看成對。這對 `("h", "u")` 出現在單詞 `"hug"` 和 `"hugs"`中,所以語料庫中總共有15次。不過,這並不是最頻繁的一對:這個榮譽屬於 `("u", "g")`,它出現在 `"hug"`, `"pug"`, 以及 `"hugs"`中,在詞彙表中總共 20 次。 因此,標記器學習的第一個合併規則是 `("u", "g") -> "ug"`,意思就是 `"ug"` 將被添加到詞彙表中,並且這對應該合併到語料庫的所有單詞中。在這個階段結束時,詞彙表和語料庫看起來像這樣: ``` Vocabulary: ["b", "g", "h", "n", "p", "s", "u", "ug"] Corpus: ("h" "ug", 10), ("p" "ug", 5), ("p" "u" "n", 12), ("b" "u" "n", 4), ("h" "ug" "s", 5) ``` 現在我們有一些導致標記長於兩個字符的對: 例如 `("h", "ug")`, 在語料庫中出現15次。然而,這個階段最頻繁的對是 `("u", "n")`,在語料庫中出現16次,所以學到的第二個合併規則是 `("u", "n") -> "un"`。將其添加到詞彙表併合並所有現有的這個對,將出現: ``` Vocabulary: ["b", "g", "h", "n", "p", "s", "u", "ug", "un"] Corpus: ("h" "ug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("h" "ug" "s", 5) ``` 現在最頻繁的一對是 `("h", "ug")`,所以我們學習了合併規則 `("h", "ug") -> "hug"`,這給了我們第一個三個字母的標記。合併後,語料庫如下所示: ``` Vocabulary: ["b", "g", "h", "n", "p", "s", "u", "ug", "un", "hug"] Corpus: ("hug", 10), ("p" "ug", 5), ("p" "un", 12), ("b" "un", 4), ("hug" "s", 5) ``` 我們繼續這樣合併,直到達到我們所需的詞彙量。 <Tip> ✏️ **現在輪到你了!**你認為下一個合併規則是什麼? </Tip> ## 標記化算法 標記化緊跟訓練過程,從某種意義上說,通過應用以下步驟對新輸入進行標記: 1. 規範化 2. 預標記化 3. 將單詞拆分為單個字符 4. 將學習的合併規則按順序應用於這些拆分 讓我們以我們在訓練期間使用的示例為例,學習三個合併規則: ``` ("u", "g") -> "ug" ("u", "n") -> "un" ("h", "ug") -> "hug" ``` 這個單詞 `"bug"` 將被標記為 `["b", "ug"]`。然而 `"mug"`,將被標記為 `["[UNK]", "ug"]`,因為字母 `"m"` 不再基本詞彙表中。同樣,單詞`"thug"` 會被標記為 `["[UNK]", "hug"]`: 字母 `"t"` 不在基本詞彙表中,應用合併規則首先導致 `"u"` 和 `"g"` 被合併,然後是 `"hu"` 和 `"g"` 被合併。 <Tip> ✏️ **現在輪到你了!** 你認為這個詞 `"unhug"` 將如何被標記? </Tip> ## 實現 BPE 現在讓我們看一下 BPE 算法的實現。這不會是你可以在大型語料庫上實際使用的優化版本;我們只是想向你展示代碼,以便你可以更好地理解算法 首先我們需要一個語料庫,所以讓我們用幾句話創建一個簡單的語料庫: ```python corpus = [ "This is the Hugging Face course.", "This chapter is about tokenization.", "This section shows several tokenizer algorithms.", "Hopefully, you will be able to understand how they are trained and generate tokens.", ] ``` 接下來,我們需要將該語料庫預先標記為單詞。由於我們正在複製 BPE 標記器(如 GPT-2),我們將使用 `gpt2` 標記器作為預標記化的標記器: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("gpt2") ``` 然後我們在進行預標記化時計算語料庫中每個單詞的頻率: ```python from collections import defaultdict word_freqs = defaultdict(int) for text in corpus: words_with_offsets = tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str(text) new_words = [word for word, offset in words_with_offsets] for word in new_words: word_freqs[word] += 1 print(word_freqs) ``` ```python out defaultdict(int, {'This': 3, 'Ġis': 2, 'Ġthe': 1, 'ĠHugging': 1, 'ĠFace': 1, 'ĠCourse': 1, '.': 4, 'Ġchapter': 1, 'Ġabout': 1, 'Ġtokenization': 1, 'Ġsection': 1, 'Ġshows': 1, 'Ġseveral': 1, 'Ġtokenizer': 1, 'Ġalgorithms': 1, 'Hopefully': 1, ',': 1, 'Ġyou': 1, 'Ġwill': 1, 'Ġbe': 1, 'Ġable': 1, 'Ġto': 1, 'Ġunderstand': 1, 'Ġhow': 1, 'Ġthey': 1, 'Ġare': 1, 'Ġtrained': 1, 'Ġand': 1, 'Ġgenerate': 1, 'Ġtokens': 1}) ``` 下一步是計算基本詞彙,由語料庫中使用的所有字符組成: ```python alphabet = [] for word in word_freqs.keys(): for letter in word: if letter not in alphabet: alphabet.append(letter) alphabet.sort() print(alphabet) ``` ```python out [ ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'Ġ'] ``` 我們還在該詞彙表的開頭添加了模型使用的特殊標記。對於GPT-2,唯一的特殊標記是 `"<|endoftext|>"`: ```python vocab = ["<|endoftext|>"] + alphabet.copy() ``` 我們現在需要將每個單詞拆分為單獨的字符,以便能夠開始訓練: ```python splits = {word: [c for c in word] for word in word_freqs.keys()} ``` 現在我們已準備好進行訓練,讓我們編寫一個函數來計算每對的頻率。我們需要在訓練的每個步驟中使用它: ```python def compute_pair_freqs(splits): pair_freqs = defaultdict(int) for word, freq in word_freqs.items(): split = splits[word] if len(split) == 1: continue for i in range(len(split) - 1): pair = (split[i], split[i + 1]) pair_freqs[pair] += freq return pair_freqs ``` 讓我們來看看這個字典在初始拆分後的一部分: ```python pair_freqs = compute_pair_freqs(splits) for i, key in enumerate(pair_freqs.keys()): print(f"{key}: {pair_freqs[key]}") if i >= 5: break ``` ```python out ('T', 'h'): 3 ('h', 'i'): 3 ('i', 's'): 5 ('Ġ', 'i'): 2 ('Ġ', 't'): 7 ('t', 'h'): 3 ``` 現在, 找到最頻繁的對只需要一個快速的循環: ```python best_pair = "" max_freq = None for pair, freq in pair_freqs.items(): if max_freq is None or max_freq < freq: best_pair = pair max_freq = freq print(best_pair, max_freq) ``` ```python out ('Ġ', 't') 7 ``` 所以第一個要學習的合併是 `('Ġ', 't') -> 'Ġt'`, 我們添加 `'Ġt'` 到詞彙表: ```python merges = {("Ġ", "t"): "Ġt"} vocab.append("Ġt") ``` 要繼續接下來的步驟,我們需要在我們的`分詞`字典中應用該合併。讓我們為此編寫另一個函數: ```python def merge_pair(a, b, splits): for word in word_freqs: split = splits[word] if len(split) == 1: continue i = 0 while i < len(split) - 1: if split[i] == a and split[i + 1] == b: split = split[:i] + [a + b] + split[i + 2 :] else: i += 1 splits[word] = split return splits ``` 我們可以看看第一次合併的結果: ```py splits = merge_pair("Ġ", "t", splits) print(splits["Ġtrained"]) ``` ```python out ['Ġt', 'r', 'a', 'i', 'n', 'e', 'd'] ``` 現在我們有了循環所需的一切,直到我們學會了我們想要的所有合併。我們的目標是詞彙量達到50: ```python vocab_size = 50 while len(vocab) < vocab_size: pair_freqs = compute_pair_freqs(splits) best_pair = "" max_freq = None for pair, freq in pair_freqs.items(): if max_freq is None or max_freq < freq: best_pair = pair max_freq = freq splits = merge_pair(*best_pair, splits) merges[best_pair] = best_pair[0] + best_pair[1] vocab.append(best_pair[0] + best_pair[1]) ``` 結果,我們學習了 19 條合併規則(初始詞彙表的大小 31 -- 30 字母字符,加上特殊標記): ```py print(merges) ``` ```python out {('Ġ', 't'): 'Ġt', ('i', 's'): 'is', ('e', 'r'): 'er', ('Ġ', 'a'): 'Ġa', ('Ġt', 'o'): 'Ġto', ('e', 'n'): 'en', ('T', 'h'): 'Th', ('Th', 'is'): 'This', ('o', 'u'): 'ou', ('s', 'e'): 'se', ('Ġto', 'k'): 'Ġtok', ('Ġtok', 'en'): 'Ġtoken', ('n', 'd'): 'nd', ('Ġ', 'is'): 'Ġis', ('Ġt', 'h'): 'Ġth', ('Ġth', 'e'): 'Ġthe', ('i', 'n'): 'in', ('Ġa', 'b'): 'Ġab', ('Ġtoken', 'i'): 'Ġtokeni'} ``` 詞彙表由特殊標記、初始字母和所有合併結果組成: ```py print(vocab) ``` ```python out ['<|endoftext|>', ',', '.', 'C', 'F', 'H', 'T', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'k', 'l', 'm', 'n', 'o', 'p', 'r', 's', 't', 'u', 'v', 'w', 'y', 'z', 'Ġ', 'Ġt', 'is', 'er', 'Ġa', 'Ġto', 'en', 'Th', 'This', 'ou', 'se', 'Ġtok', 'Ġtoken', 'nd', 'Ġis', 'Ġth', 'Ġthe', 'in', 'Ġab', 'Ġtokeni'] ``` <Tip> 💡 在同一語料庫上使用 `train_new_from_iterator()` 不會產生完全相同的詞彙表。這是因為當有最頻繁對的選擇時,我們選擇遇到的第一個, 而 🤗 Tokenizers 庫根據內部ID選擇第一個。 </Tip> 為了對新文本進行分詞,我們對其進行預分詞、拆分,然後應用學到的所有合併規則: ```python def tokenize(text): pre_tokenize_result = tokenizer._tokenizer.pre_tokenizer.pre_tokenize_str(text) pre_tokenized_text = [word for word, offset in pre_tokenize_result] splits = [[l for l in word] for word in pre_tokenized_text] for pair, merge in merges.items(): for idx, split in enumerate(splits): i = 0 while i < len(split) - 1: if split[i] == pair[0] and split[i + 1] == pair[1]: split = split[:i] + [merge] + split[i + 2 :] else: i += 1 splits[idx] = split return sum(splits, []) ``` 我們可以在任何由字母表中的字符組成的文本上嘗試這個: ```py tokenize("This is not a token.") ``` ```python out ['This', 'Ġis', 'Ġ', 'n', 'o', 't', 'Ġa', 'Ġtoken', '.'] ``` <Tip warning={true}> ⚠️ 如果存在未知字符,我們的實現將拋出錯誤,因為我們沒有做任何處理它們。GPT-2 實際上沒有未知標記(使用字節級 BPE 時不可能得到未知字符),但這可能發生在這裡,因為我們沒有在初始詞彙表中包含所有可能的字節。 BPE 的這方面超出了本節的範圍,因此我們忽略了細節。 </Tip> 這就是 BPE 算法!接下來,我們將看看 WordPiece。
course/chapters/zh-TW/chapter6/5.mdx/0
{ "file_path": "course/chapters/zh-TW/chapter6/5.mdx", "repo_id": "course", "token_count": 8073 }
151
1 00:00:00,234 --> 00:00:02,901 (page whirring) 2 00:00:04,260 --> 00:00:07,200 - Before diving in character-based tokenization, 3 00:00:07,200 --> 00:00:10,350 understanding why this kind of tokenization is interesting 4 00:00:10,350 --> 00:00:13,533 requires understanding the flaws of word-based tokenization. 5 00:00:14,640 --> 00:00:16,320 If you haven't seen the first video 6 00:00:16,320 --> 00:00:17,880 on word-based tokenization 7 00:00:17,880 --> 00:00:21,450 we recommend you check it out before looking at this video. 8 00:00:21,450 --> 00:00:24,250 Okay, let's take a look at character-based tokenization. 9 00:00:25,650 --> 00:00:28,560 We now split our text into individual characters, 10 00:00:28,560 --> 00:00:29,673 rather than words. 11 00:00:32,850 --> 00:00:35,550 There are generally a lot of different words in languages, 12 00:00:35,550 --> 00:00:37,743 while the number of characters stays low. 13 00:00:38,610 --> 00:00:41,313 To begin let's take a look at the English language, 14 00:00:42,210 --> 00:00:45,540 it has an estimated 170,000 different words, 15 00:00:45,540 --> 00:00:47,730 so we would need a very large vocabulary 16 00:00:47,730 --> 00:00:49,413 to encompass all words. 17 00:00:50,280 --> 00:00:52,200 With a character-based vocabulary, 18 00:00:52,200 --> 00:00:55,440 we can get by with only 256 characters, 19 00:00:55,440 --> 00:00:58,683 which includes letters, numbers and special characters. 20 00:00:59,760 --> 00:01:02,190 Even languages with a lot of different characters 21 00:01:02,190 --> 00:01:04,800 like the Chinese languages can have dictionaries 22 00:01:04,800 --> 00:01:08,130 with up to 20,000 different characters 23 00:01:08,130 --> 00:01:11,523 but more than 375,000 different words. 24 00:01:12,480 --> 00:01:14,310 So character-based vocabularies 25 00:01:14,310 --> 00:01:16,293 let us use fewer different tokens 26 00:01:16,293 --> 00:01:19,050 than the word-based tokenization dictionaries 27 00:01:19,050 --> 00:01:20,523 we would otherwise use. 28 00:01:23,250 --> 00:01:25,830 These vocabularies are also more complete 29 00:01:25,830 --> 00:01:28,950 than their word-based vocabularies counterparts. 30 00:01:28,950 --> 00:01:31,410 As our vocabulary contains all characters 31 00:01:31,410 --> 00:01:33,960 used in a language, even words unseen 32 00:01:33,960 --> 00:01:36,990 during the tokenizer training can still be tokenized, 33 00:01:36,990 --> 00:01:39,633 so out-of-vocabulary tokens will be less frequent. 34 00:01:40,680 --> 00:01:42,840 This includes the ability to correctly tokenize 35 00:01:42,840 --> 00:01:45,210 misspelled words, rather than discarding them 36 00:01:45,210 --> 00:01:46,623 as unknown straight away. 37 00:01:48,240 --> 00:01:52,380 However, this algorithm isn't perfect either. 38 00:01:52,380 --> 00:01:54,360 Intuitively, characters do not hold 39 00:01:54,360 --> 00:01:57,990 as much information individually as a word would hold. 40 00:01:57,990 --> 00:02:00,930 For example, "Let's" holds more information 41 00:02:00,930 --> 00:02:03,570 than it's first letter "l". 42 00:02:03,570 --> 00:02:05,880 Of course, this is not true for all languages, 43 00:02:05,880 --> 00:02:08,880 as some languages like ideogram-based languages 44 00:02:08,880 --> 00:02:11,523 have a lot of information held in single characters, 45 00:02:12,750 --> 00:02:15,360 but for others like roman-based languages, 46 00:02:15,360 --> 00:02:17,760 the model will have to make sense of multiple tokens 47 00:02:17,760 --> 00:02:20,670 at a time to get the information otherwise held 48 00:02:20,670 --> 00:02:21,753 in a single word. 49 00:02:23,760 --> 00:02:27,000 This leads to another issue with character-based tokenizers, 50 00:02:27,000 --> 00:02:29,520 their sequences are translated into very large amount 51 00:02:29,520 --> 00:02:31,593 of tokens to be processed by the model. 52 00:02:33,090 --> 00:02:36,810 And this can have an impact on the size of the context 53 00:02:36,810 --> 00:02:40,020 the model will carry around, and will reduce the size 54 00:02:40,020 --> 00:02:42,030 of the text we can use as input for our model, 55 00:02:42,030 --> 00:02:43,233 which is often limited. 56 00:02:44,100 --> 00:02:46,650 This tokenization, while it has some issues, 57 00:02:46,650 --> 00:02:48,720 has seen some very good results in the past 58 00:02:48,720 --> 00:02:50,490 and so it should be considered when approaching 59 00:02:50,490 --> 00:02:52,680 a new problem as it solves issues 60 00:02:52,680 --> 00:02:54,843 encountered in the word-based algorithm. 61 00:02:56,107 --> 00:02:58,774 (page whirring)
course/subtitles/en/14_character-based-tokenizers.srt/0
{ "file_path": "course/subtitles/en/14_character-based-tokenizers.srt", "repo_id": "course", "token_count": 1801 }
152
1 00:00:00,225 --> 00:00:02,892 (air whooshing) 2 00:00:05,460 --> 00:00:07,470 - Supercharge your PyTorch training loop 3 00:00:07,470 --> 00:00:08,943 with Hugging Face Accelerate. 4 00:00:11,340 --> 00:00:12,600 There are multiple setups 5 00:00:12,600 --> 00:00:14,580 on which you can run your training: 6 00:00:14,580 --> 00:00:17,910 it could be on CPU, GPUs, TPUs, 7 00:00:17,910 --> 00:00:20,610 distributed on one machine with several devices, 8 00:00:20,610 --> 00:00:23,220 or even several machines, often called nodes, 9 00:00:23,220 --> 00:00:25,173 each with multiple devices. 10 00:00:26,340 --> 00:00:28,200 On top of that, there are new tweaks 11 00:00:28,200 --> 00:00:30,810 to make your training faster or more efficient, 12 00:00:30,810 --> 00:00:32,763 like mixed precision and DeepSpeed. 13 00:00:33,840 --> 00:00:36,600 Each of those setups or training tweaks 14 00:00:36,600 --> 00:00:38,760 requires you to change the code of your training loop 15 00:00:38,760 --> 00:00:41,733 in one way or another and to learn a new API. 16 00:00:43,260 --> 00:00:45,940 All those setups are handled by the Trainer API, 17 00:00:45,940 --> 00:00:49,590 and there are several third-party libraries that can help. 18 00:00:49,590 --> 00:00:50,760 The problem with those 19 00:00:50,760 --> 00:00:53,100 is that they can feel like a black box 20 00:00:53,100 --> 00:00:55,320 and that it might not be easy to implement the tweak 21 00:00:55,320 --> 00:00:56,820 to the training loop you need. 22 00:00:57,840 --> 00:00:59,760 Accelerate has been designed specifically 23 00:00:59,760 --> 00:01:02,790 to let you retain full control over your training loop 24 00:01:02,790 --> 00:01:04,833 and be as non-intrusive as possible. 25 00:01:05,760 --> 00:01:08,760 With just four lines of code to add to your training loop, 26 00:01:08,760 --> 00:01:11,733 here shown on the example of the training loop video, 27 00:01:12,630 --> 00:01:14,730 Accelerate will handle all the setups 28 00:01:14,730 --> 00:01:17,180 and training tweaks mentioned on the first slide. 29 00:01:18,630 --> 00:01:20,400 It's only one API to learn and master 30 00:01:20,400 --> 00:01:21,933 instead of 10 different ones. 31 00:01:23,340 --> 00:01:25,980 More specifically, you have to import and instantiate 32 00:01:25,980 --> 00:01:27,360 an accelerator object, 33 00:01:27,360 --> 00:01:29,100 that will handle all the necessary code 34 00:01:29,100 --> 00:01:30,300 for your specific setup. 35 00:01:31,380 --> 00:01:33,780 Then you have to send it the model, 36 00:01:33,780 --> 00:01:36,000 optimizer and dataloaders you are using 37 00:01:36,000 --> 00:01:39,633 in the prepare method, which is the main method to remember. 38 00:01:40,860 --> 00:01:42,870 Accelerate handles device placement, 39 00:01:42,870 --> 00:01:44,370 so you don't need to put your batch 40 00:01:44,370 --> 00:01:46,980 on the specific device you are using. 41 00:01:46,980 --> 00:01:50,640 Finally, you have to replace the loss.backward line 42 00:01:50,640 --> 00:01:54,300 by accelerator.backwardloss, 43 00:01:54,300 --> 00:01:55,500 and that's all you need! 44 00:01:58,410 --> 00:02:01,710 Accelerate also handles distributed evaluation. 45 00:02:01,710 --> 00:02:04,020 You can still use a classic evaluation loop 46 00:02:04,020 --> 00:02:06,750 such as the one we saw in the training loop video, 47 00:02:06,750 --> 00:02:08,280 in which case all processes 48 00:02:08,280 --> 00:02:10,083 will perform the full evaluation. 49 00:02:11,340 --> 00:02:13,530 To use a distributed evaluation, 50 00:02:13,530 --> 00:02:16,380 you just have to adapt your evaluation loop like this: 51 00:02:16,380 --> 00:02:17,657 pass along the evaluation dataloader 52 00:02:17,657 --> 00:02:21,093 to the accelerator.prepare method, like for training. 53 00:02:22,170 --> 00:02:23,430 Then you can dismiss the line 54 00:02:23,430 --> 00:02:26,160 that places the batch on the proper device, 55 00:02:26,160 --> 00:02:27,870 and just before passing your predictions 56 00:02:27,870 --> 00:02:31,110 and labels to your metric, use accelerator.gather 57 00:02:31,110 --> 00:02:33,300 to gather together the predictions 58 00:02:33,300 --> 00:02:34,803 and labels from each process. 59 00:02:36,420 --> 00:02:37,890 A distributed training script 60 00:02:37,890 --> 00:02:41,040 has to be launched several times on different processes, 61 00:02:41,040 --> 00:02:43,203 for instance, one per GPU you are using. 62 00:02:44,070 --> 00:02:46,350 You can use the PyTorch tools to do that 63 00:02:46,350 --> 00:02:48,210 if you are familiar with them, 64 00:02:48,210 --> 00:02:50,520 but Accelerate also provides an easy API 65 00:02:50,520 --> 00:02:53,523 to configure your setup and launch your training script. 66 00:02:54,540 --> 00:02:57,270 In a terminal, run accelerate config 67 00:02:57,270 --> 00:02:58,650 and answer the small questionnaire 68 00:02:58,650 --> 00:03:00,330 to generate a configuration file 69 00:03:00,330 --> 00:03:02,073 with all the relevant information, 70 00:03:03,240 --> 00:03:05,790 then you can just run accelerate launch, 71 00:03:05,790 --> 00:03:08,580 followed by the path to your training script. 72 00:03:08,580 --> 00:03:12,000 In a notebook, you can use the notebook launcher function 73 00:03:12,000 --> 00:03:13,233 to launch your training. 74 00:03:15,186 --> 00:03:17,853 (air whooshing)
course/subtitles/en/30_supercharge-your-pytorch-training-loop-with-accelerate.srt/0
{ "file_path": "course/subtitles/en/30_supercharge-your-pytorch-training-loop-with-accelerate.srt", "repo_id": "course", "token_count": 2131 }
153
1 00:00:00,180 --> 00:00:03,013 (whooshing sound) 2 00:00:05,310 --> 00:00:06,143 - Let's have a look 3 00:00:06,143 --> 00:00:08,133 inside the token classification pipeline. 4 00:00:09,780 --> 00:00:11,430 In the pipeline video, 5 00:00:11,430 --> 00:00:13,230 we looked at the different applications 6 00:00:13,230 --> 00:00:16,050 the Transformers library supports out of the box. 7 00:00:16,050 --> 00:00:18,660 One of them being token classification. 8 00:00:18,660 --> 00:00:22,050 For instance, predicting for each word in a sentence, 9 00:00:22,050 --> 00:00:23,790 whether they correspond to a person, 10 00:00:23,790 --> 00:00:26,043 an organization, or location. 11 00:00:27,690 --> 00:00:29,250 We can even group together the tokens 12 00:00:29,250 --> 00:00:31,320 corresponding to the same entity. 13 00:00:31,320 --> 00:00:34,890 For instance, all the tokens that form the word Sylvain here 14 00:00:34,890 --> 00:00:36,423 or Hugging and Face. 15 00:00:37,320 --> 00:00:39,720 So, token classification pipeline 16 00:00:39,720 --> 00:00:42,480 works the same way as a text classification pipeline 17 00:00:42,480 --> 00:00:44,910 we studied in a previous video. 18 00:00:44,910 --> 00:00:46,500 There are three steps. 19 00:00:46,500 --> 00:00:50,043 Tokenization, the model, and the post processing. 20 00:00:51,690 --> 00:00:53,190 The first two steps are identical 21 00:00:53,190 --> 00:00:55,230 to the text classification pipeline, 22 00:00:55,230 --> 00:00:58,230 except we use an auto token classification model 23 00:00:58,230 --> 00:01:00,303 instead of a sequence classification one. 24 00:01:01,560 --> 00:01:04,593 We tokenize our text, then feed it to the model. 25 00:01:05,580 --> 00:01:08,160 Instead of getting one number for each possible level 26 00:01:08,160 --> 00:01:09,600 for the whole sentence, 27 00:01:09,600 --> 00:01:12,270 we get one number for each of the possible nine levels 28 00:01:12,270 --> 00:01:14,250 for every token in the sentence. 29 00:01:14,250 --> 00:01:15,573 Here, 19. 30 00:01:17,070 --> 00:01:19,710 Like all the other models of the Transformers library, 31 00:01:19,710 --> 00:01:22,560 our model outputs logits which we need to turn 32 00:01:22,560 --> 00:01:24,663 into predictions by using a SoftMax. 33 00:01:25,830 --> 00:01:28,170 We also get the predicted label for each token 34 00:01:28,170 --> 00:01:30,063 by taking the maximum prediction. 35 00:01:31,080 --> 00:01:33,540 Since the softmax function preserves the order, 36 00:01:33,540 --> 00:01:34,980 we could have done it on the logits 37 00:01:34,980 --> 00:01:36,830 if we had no need of the predictions. 38 00:01:37,680 --> 00:01:40,050 The model config contains the label mapping 39 00:01:40,050 --> 00:01:42,090 in its id2label field. 40 00:01:42,090 --> 00:01:45,600 Using it, we can map every token to its corresponding label. 41 00:01:45,600 --> 00:01:48,630 The label O corresponds to "no entity" 42 00:01:48,630 --> 00:01:50,460 which is why we didn't see it in our results 43 00:01:50,460 --> 00:01:52,110 in the first slide. 44 00:01:52,110 --> 00:01:54,150 On top of the label and the probability, 45 00:01:54,150 --> 00:01:55,620 those results included the start 46 00:01:55,620 --> 00:01:57,423 and end character in the sentence. 47 00:01:58,294 --> 00:01:59,880 We'll need to use the offset mapping 48 00:01:59,880 --> 00:02:01,110 of the tokenizer to get those. 49 00:02:01,110 --> 00:02:03,090 Look at the video link below 50 00:02:03,090 --> 00:02:05,340 if you don't know about them already. 51 00:02:05,340 --> 00:02:06,990 Then, looping through each token 52 00:02:06,990 --> 00:02:09,090 that has a label distinct from O, 53 00:02:09,090 --> 00:02:10,590 we can build the list of results 54 00:02:10,590 --> 00:02:12,140 we got with our first pipeline. 55 00:02:13,650 --> 00:02:15,840 The last step is to group together tokens 56 00:02:15,840 --> 00:02:17,640 that corresponds to the same entity. 57 00:02:18,930 --> 00:02:21,540 This is why we had two labels for each type of entity, 58 00:02:21,540 --> 00:02:23,940 I-PER and B-PER for instance. 59 00:02:23,940 --> 00:02:25,530 It allows us to know if a token 60 00:02:25,530 --> 00:02:27,603 is in the same entity as a previous one. 61 00:02:28,620 --> 00:02:29,850 Note that there are two ways 62 00:02:29,850 --> 00:02:32,490 of labeling used for token classification. 63 00:02:32,490 --> 00:02:35,360 One, in pink here, uses the B-PER label 64 00:02:35,360 --> 00:02:37,530 at the beginning of each new entity. 65 00:02:37,530 --> 00:02:39,990 But the other in blue only uses it 66 00:02:39,990 --> 00:02:42,933 to separate two adjacent entities of the same types. 67 00:02:44,340 --> 00:02:46,560 In both cases we can flag a new entity 68 00:02:46,560 --> 00:02:49,110 each time we see a new label appearing, 69 00:02:49,110 --> 00:02:51,330 either with the I or B prefix. 70 00:02:51,330 --> 00:02:53,850 Then, take all the following tokens labeled the same 71 00:02:53,850 --> 00:02:55,470 with an I-flag. 72 00:02:55,470 --> 00:02:57,000 This, coupled with the offset mapping 73 00:02:57,000 --> 00:02:59,010 to get the start and end characters 74 00:02:59,010 --> 00:03:01,560 allows us to get the span of texts for each entity. 75 00:03:02,869 --> 00:03:05,702 (whooshing sound)
course/subtitles/en/46_inside-the-token-classification-pipeline-(tensorflow).srt/0
{ "file_path": "course/subtitles/en/46_inside-the-token-classification-pipeline-(tensorflow).srt", "repo_id": "course", "token_count": 2132 }
154
1 00:00:00,624 --> 00:00:03,374 (logo whooshing) 2 00:00:05,700 --> 00:00:07,740 - What is the ROUGE metric? 3 00:00:07,740 --> 00:00:08,880 For many NLP tasks 4 00:00:08,880 --> 00:00:12,270 we can use common metrics like accuracy or F1 score. 5 00:00:12,270 --> 00:00:13,650 But what do you do when you wanna measure something 6 00:00:13,650 --> 00:00:16,920 like the quality of a summary from a model like T5? 7 00:00:16,920 --> 00:00:18,180 In this video, we'll take a look 8 00:00:18,180 --> 00:00:21,180 at a widely used metric for text summarization called ROUGE. 9 00:00:22,740 --> 00:00:24,660 There are actually several variants of ROUGE 10 00:00:24,660 --> 00:00:26,190 but the basic idea behind all of them 11 00:00:26,190 --> 00:00:27,840 is to assign a single numerical score 12 00:00:27,840 --> 00:00:30,000 to a summary that tells us how good it is 13 00:00:30,000 --> 00:00:32,774 compared to one or more reference summaries. 14 00:00:32,774 --> 00:00:34,020 In this example, we have a book review 15 00:00:34,020 --> 00:00:36,570 that has been summarized by some model. 16 00:00:36,570 --> 00:00:38,320 If we compare the generated summary 17 00:00:39,168 --> 00:00:40,260 to some reference human summaries, we can see 18 00:00:40,260 --> 00:00:42,841 that the model is actually pretty good 19 00:00:42,841 --> 00:00:44,063 and only differs by a word or two. 20 00:00:45,060 --> 00:00:46,260 So how can we measure the quality 21 00:00:46,260 --> 00:00:49,050 of a generated summary in an automatic way? 22 00:00:49,050 --> 00:00:51,510 The approach that ROUGE takes is to compare the n-grams 23 00:00:51,510 --> 00:00:55,200 of the generated summary to the n-grams of the references. 24 00:00:55,200 --> 00:00:58,590 And n-gram is just a fancy way of saying a chunk of N words. 25 00:00:58,590 --> 00:01:00,030 So let's start with unigrams 26 00:01:00,030 --> 00:01:02,780 which correspond to the individual words in a sentence. 27 00:01:03,780 --> 00:01:05,250 In this example, you can see that six 28 00:01:05,250 --> 00:01:07,650 of the words in the generated summary are also found 29 00:01:07,650 --> 00:01:09,420 in one of the reference summaries. 30 00:01:09,420 --> 00:01:11,310 And the rouge metric that compares unigrams 31 00:01:11,310 --> 00:01:12,260 is called ROUGE-1. 32 00:01:14,533 --> 00:01:16,770 Now that we found our matches, one way to assign a score 33 00:01:16,770 --> 00:01:20,280 to the summary is to compute the recall of the unigrams. 34 00:01:20,280 --> 00:01:21,540 This means we just count the number 35 00:01:21,540 --> 00:01:22,950 of matching words in the generated 36 00:01:22,950 --> 00:01:25,290 and reference summaries and normalize the count 37 00:01:25,290 --> 00:01:28,200 by dividing by the number of words in the reference. 38 00:01:28,200 --> 00:01:30,450 In this example, we found six matching words 39 00:01:30,450 --> 00:01:32,160 and our reference has six words. 40 00:01:32,160 --> 00:01:33,933 So our unigram recall is perfect. 41 00:01:34,800 --> 00:01:35,810 This means that all of the words 42 00:01:35,810 --> 00:01:37,500 in the reference summary have been produced 43 00:01:37,500 --> 00:01:38,550 in the generated one. 44 00:01:40,050 --> 00:01:42,360 Now, perfect recall sounds great, but imagine 45 00:01:42,360 --> 00:01:44,520 if our generated summary have been something like 46 00:01:44,520 --> 00:01:45,720 I really, really, really, 47 00:01:45,720 --> 00:01:48,150 really loved reading the Hunger Games. 48 00:01:48,150 --> 00:01:49,378 This would also have perfect recall 49 00:01:49,378 --> 00:01:51,330 but is arguably a worse summary, 50 00:01:51,330 --> 00:01:52,653 since it is verbose. 51 00:01:53,550 --> 00:01:54,600 To deal with these scenarios, 52 00:01:54,600 --> 00:01:56,190 we can also compute precision, 53 00:01:56,190 --> 00:01:58,380 which in the ROUGE context measures how much 54 00:01:58,380 --> 00:02:00,810 of the generator summary was relevant. 55 00:02:00,810 --> 00:02:03,630 In practice, both precision and recall are usually computed 56 00:02:03,630 --> 00:02:05,493 and then the F1 score is reported. 57 00:02:07,170 --> 00:02:08,542 Now we can change the granularity 58 00:02:08,542 --> 00:02:13,020 of the comparison by comparing bigrams instead of unigrams. 59 00:02:13,020 --> 00:02:15,090 With bigrams, we chunk the sentence into pairs 60 00:02:15,090 --> 00:02:17,910 of consecutive words and then count how many pairs 61 00:02:17,910 --> 00:02:21,360 in the generated summary are present in the reference one. 62 00:02:21,360 --> 00:02:23,880 This gives us ROUGE-2 precision and recall 63 00:02:23,880 --> 00:02:24,780 which as we can see, 64 00:02:24,780 --> 00:02:27,780 is lower than the ROUGE-1 scores from earlier. 65 00:02:27,780 --> 00:02:29,400 Now, if the summaries are long, 66 00:02:29,400 --> 00:02:31,740 the ROUGE-2 scores will generally be small 67 00:02:31,740 --> 00:02:34,290 because there are fewer bios to match. 68 00:02:34,290 --> 00:02:36,870 And this is also true for abstractive summarization. 69 00:02:36,870 --> 00:02:39,993 So both ROUGE-1 and ROUGE-2 scores are usually reported. 70 00:02:42,000 --> 00:02:45,330 The last ROUGE variant we will discuss is ROUGE L. 71 00:02:45,330 --> 00:02:47,160 ROUGE L doesn't compare ngrams 72 00:02:47,160 --> 00:02:49,572 but instead treats each summary as a sequence of words 73 00:02:49,572 --> 00:02:53,403 and then looks for the longest common subsequence or LCS. 74 00:02:54,775 --> 00:02:56,130 A subsequence is a sequence that appears 75 00:02:56,130 --> 00:02:59,760 in the same relative order, but not necessarily contiguous. 76 00:02:59,760 --> 00:03:03,210 So in this example, I loved reading the Hunger Games, 77 00:03:03,210 --> 00:03:06,930 is the longest common subsequence between the two summaries. 78 00:03:06,930 --> 00:03:08,610 And the main advantage of ROUGE L 79 00:03:08,610 --> 00:03:11,670 over ROUGE-1 or ROUGE-2 is that it doesn't depend 80 00:03:11,670 --> 00:03:14,100 on consecutive n-gram matches, and so it tends 81 00:03:14,100 --> 00:03:16,650 to capture sentence structure much more accurately. 82 00:03:18,150 --> 00:03:19,440 Now to compute ROUGE scores 83 00:03:19,440 --> 00:03:21,660 in the data sets library is very simple. 84 00:03:21,660 --> 00:03:23,910 You just use the load_metric function, 85 00:03:23,910 --> 00:03:26,400 provide your model summaries along with the references 86 00:03:26,400 --> 00:03:27,500 and you're good to go. 87 00:03:28,770 --> 00:03:30,120 The output from the calculation 88 00:03:30,120 --> 00:03:31,507 contains a lot of information. 89 00:03:31,507 --> 00:03:34,560 The first thing we can see is that the confidence intervals 90 00:03:34,560 --> 00:03:36,090 of each ROUGE score are provided 91 00:03:36,090 --> 00:03:39,030 in the low, mid and high fields. 92 00:03:39,030 --> 00:03:40,980 This is really useful if you wanna know the spread 93 00:03:40,980 --> 00:03:43,730 of your ROUGE scores when comparing two or more models. 94 00:03:45,090 --> 00:03:46,050 The second thing to notice 95 00:03:46,050 --> 00:03:48,330 is that we have four types of ROUGE score. 96 00:03:48,330 --> 00:03:51,480 We've already seen ROUGE-1, ROUGE-2 and ROUGE-L 97 00:03:51,480 --> 00:03:53,760 So what is ROUGE-L sum? 98 00:03:53,760 --> 00:03:55,410 Well, the sum in ROUGEL's sum 99 00:03:55,410 --> 00:03:57,630 refers to the fact that this metric is computed 100 00:03:57,630 --> 00:04:00,240 over a whole summary while ROUGE-L is computed 101 00:04:00,240 --> 00:04:02,493 as the average of individual sentences. 102 00:04:04,166 --> 00:04:06,916 (logo whooshing)
course/subtitles/en/62_what-is-the-rouge-metric.srt/0
{ "file_path": "course/subtitles/en/62_what-is-the-rouge-metric.srt", "repo_id": "course", "token_count": 3077 }
155
1 00:00:04,660 --> 00:00:07,589 Welcome to the Hugging Face tasks series! 2 00:00:07,589 --> 00:00:13,730 In this video we’ll take a look at Masked Language Modeling. 3 00:00:13,730 --> 00:00:20,720 Masked language modeling is the task of predicting which words should fill in the blanks of a 4 00:00:20,720 --> 00:00:23,500 sentence. 5 00:00:23,500 --> 00:00:32,870 These models take a masked text as the input and output the possible values for that mask. 6 00:00:32,870 --> 00:00:37,550 Masked language modeling is handy before fine-tuning your model for your task. 7 00:00:37,550 --> 00:00:43,579 For example, if you need to use a model in a specific domain, say, biomedical documents, 8 00:00:43,579 --> 00:00:49,050 models like BERT will treat your domain-specific words as rare tokens. 9 00:00:49,050 --> 00:00:54,220 If you train a masked language model using your biomedical corpus and then fine tune 10 00:00:54,220 --> 00:01:02,929 your model on a downstream task, you will have a better performance. 11 00:01:02,929 --> 00:01:07,799 Classification metrics can’t be used as there’s no single correct answer to mask 12 00:01:07,799 --> 00:01:08,799 values. 13 00:01:08,799 --> 00:01:12,900 Instead, we evaluate the distribution of the mask values. 14 00:01:12,900 --> 00:01:16,590 A common metric to do so is the cross-entropy loss. 15 00:01:16,590 --> 00:01:22,010 Perplexity is also a widely used metric and it is calculated as the exponential of the 16 00:01:22,010 --> 00:01:27,240 cross-entropy loss. 17 00:01:27,240 --> 00:01:35,680 You can use any dataset with plain text and tokenize the text to mask the data. 18 00:01:35,680 --> 00:01:44,710 For more information about the Masked Language Modeling, check out the Hugging Face course.
course/subtitles/en/tasks_03_🤗-tasks-masked-language-modeling.srt/0
{ "file_path": "course/subtitles/en/tasks_03_🤗-tasks-masked-language-modeling.srt", "repo_id": "course", "token_count": 630 }
156
1 00:00:03,120 --> 00:00:10,240 Jetons un coup d'œil à la tokenisation basée sur les mots. La tokenisation basée sur les mots est l'idée de diviser 2 00:00:10,240 --> 00:00:19,040 le texte brut en mots, en divisant sur des espaces ou d'autres règles spécifiques comme la ponctuation. Dans cet 3 00:00:19,040 --> 00:00:25,040 algorithme, chaque mot est associé à un numéro spécifique, un ID, qui lui est attribué. Dans cet exemple, « Let's » 4 00:00:25,040 --> 00:00:33,120 a l'ID 250, « do » a l'ID 861 et « tokenization! » a l'ID 345. 5 00:00:34,160 --> 00:00:39,840 Cette approche est intéressante, car le modèle a des représentations basées sur des mots entiers. 6 00:00:42,560 --> 00:00:45,680 Les informations contenues dans un seul nombre sont élevées 7 00:00:45,680 --> 00:00:52,880 car un mot contient de nombreuses informations contextuelles et sémantiques dans une phrase. 8 00:00:52,880 --> 00:00:58,720 Cependant, cette approche a ses limites. Par exemple, le mot « dog » et le mot 9 00:00:58,720 --> 00:01:04,320 « dogs » sont très similaires et leur sens est proche. Cependant, la tokenisation basée sur les mots 10 00:01:05,280 --> 00:01:10,320 attribuera des identifiants entièrement différents à ces deux mots, et le modèle apprendra donc 11 00:01:10,320 --> 00:01:14,880 des significations différentes pour ces deux mots. C'est malheureux, car nous aimerions que le 12 00:01:14,880 --> 00:01:21,120 modèle comprenne que ces mots sont en effet liés et que « dogs » est la forme plurielle du mot « dog ». 13 00:01:22,800 --> 00:01:26,400 Un autre problème avec cette approche est qu'il y a beaucoup de mots différents dans une langue. 14 00:01:27,840 --> 00:01:31,920 Si nous voulons que notre modèle comprenne toutes les phrases possibles dans cette langue, 15 00:01:31,920 --> 00:01:37,200 nous aurons besoin d'un identifiant pour chaque mot différent, et le nombre total de mots, 16 00:01:37,200 --> 00:01:41,440 également connu sous le nom de taille du vocabulaire, peut rapidement devenir très important. 17 00:01:44,160 --> 00:01:48,800 C'est un problème car chaque ID est associé à un grand vecteur qui représente la signification du mot. 18 00:01:50,000 --> 00:01:55,840 Et le suivi de ces associations nécessite un nombre énorme de poids lorsque la taille du vocabulaire 19 00:01:55,840 --> 00:02:03,360 est importante. Si nous voulons que nos modèles restent légers, nous pouvons opter pour que notre tokenizer ignore 20 00:02:03,360 --> 00:02:11,760 certains mots dont nous n'avons pas nécessairement besoin. Par exemple ici, lors d'entraînement de notre tokenizer sur un texte, 21 00:02:11,760 --> 00:02:23,520 nous pouvons vouloir prendre les 10 000 mots les plus fréquents dans ce texte pour créer notre vocabulaire de base, au lieu de prendre tous les mots de cette langue. 22 00:02:23,520 --> 00:02:27,200 Le tokenizer saura comment convertir ces 10 000 mots en nombres, 23 00:02:27,200 --> 00:02:33,520 mais tout autre mot sera converti en mot hors vocabulaire ou en « UNKNOWN ». 24 00:02:36,000 --> 00:02:39,760 Cela peut rapidement devenir un problème : le modèle aura exactement la même représentation 25 00:02:39,760 --> 00:02:46,720 pour tous les mots qu'il ne connaît pas, ce qui entraînera de nombreuses pertes d'informations si beaucoup de mots inconnus sont présents.
course/subtitles/fr/13_word-based-tokenizers.srt/0
{ "file_path": "course/subtitles/fr/13_word-based-tokenizers.srt", "repo_id": "course", "token_count": 1354 }
157
1 00:00:05,430 --> 00:00:07,240 Écrivez votre propre boucle d'entraînement dans PyTorch. 2 00:00:07,240 --> 00:00:11,759 Dans cette vidéo, nous verrons comment nous pouvons faire le même finetuning que dans la 3 00:00:11,759 --> 00:00:14,120 vidéo sur l'API Trainer, mais sans compter sur cette classe. 4 00:00:14,120 --> 00:00:20,369 De cette façon, vous pourrez facilement personnaliser chaque étape de la boucle d'entraînement en fonction de vos besoins. 5 00:00:20,369 --> 00:00:23,859 Ceci est également très utile pour déboguer manuellement quelque chose qui n'allait pas avec l' 6 00:00:23,859 --> 00:00:26,189 API Trainer. 7 00:00:26,189 --> 00:00:31,200 Avant de nous plonger dans le code, voici une esquisse d'une boucle d'entraînement : nous prenons un batch de 8 00:00:31,200 --> 00:00:33,469 données d'entraînement et le transmettons au modèle. 9 00:00:33,469 --> 00:00:36,600 Avec les étiquettes, nous pouvons alors calculer une perte. 10 00:00:36,600 --> 00:00:41,130 Ce nombre n'est pas utile en soi, mais est utilisé pour calculer les gradients de nos 11 00:00:41,130 --> 00:00:46,750 poids de modèle, c'est-à-dire la dérivée de la perte par rapport à chaque poids de modèle. 12 00:00:46,750 --> 00:00:51,920 Ces gradients sont ensuite utilisés par l'optimiseur pour mettre à jour les poids du modèle et les 13 00:00:51,920 --> 00:00:53,360 améliorer un peu. 14 00:00:53,360 --> 00:00:56,170 Nous répétons ensuite le processus avec un nouveau batch de données d'apprentissage. 15 00:00:56,170 --> 00:01:00,969 Si tout cela n'est pas clair, n'hésitez pas à reprendre votre 16 00:01:00,969 --> 00:01:02,240 cours d'apprentissage profond préféré. 17 00:01:02,240 --> 00:01:07,560 Nous utiliserons à nouveau le jeu de données GLUE MRPC, et nous avons vu comment prétraiter les données 18 00:01:07,560 --> 00:01:10,439 à l'aide de la bibliothèque Datasets avec le rembourrage dynamique. 19 00:01:10,439 --> 00:01:15,549 Découvrez les vidéos liées ci-dessous si vous ne les avez pas déjà vues. 20 00:01:15,549 --> 00:01:20,060 Cela fait, nous n'avons plus qu'à définir les DataLoaders PyTorch, qui seront chargés de 21 00:01:20,060 --> 00:01:24,480 convertir les éléments de notre jeu de données en batchs. 22 00:01:24,480 --> 00:01:33,890 Nous utilisons notre `DataCollatorForPadding` comme fonction d'assemblement et mélangeons l'ensemble d'entraînementpour s'assurer que nous voyons pas tous les échantillons dans le même ordre. 23 00:01:33,890 --> 00:01:39,460 Pour vérifier que tout fonctionne comme prévu, nous essayons de récupérer un batch de données et de l' 24 00:01:39,460 --> 00:01:40,460 inspecter. 25 00:01:40,460 --> 00:01:44,790 Comme nos éléments de jeu de données, c'est un dictionnaire, mais cette fois les valeurs ne sont pas une seule 26 00:01:44,790 --> 00:01:50,460 liste d'entiers, mais un tenseur de forme de taille de batch par longueur de séquence. 27 00:01:50,460 --> 00:01:52,869 L'étape suivante consiste à envoyer les données d'entraînement dans notre modèle. 28 00:01:52,869 --> 00:01:56,790 Pour cela, nous devrons créer notre modèle. 29 00:01:56,790 --> 00:02:01,240 Comme on l'a vu dans la vidéo sur l'API des modèles, nous utilisons la méthode `from_pretrained` et ajustons le nombre 30 00:02:01,240 --> 00:02:06,159 d'étiquettes au nombre de classes que nous avons sur cet jeu de données, ici deux. 31 00:02:06,159 --> 00:02:11,020 Encore une fois, pour être sûr que tout se passe bien, nous passons le batch que nous avons saisi à notre modèle 32 00:02:11,020 --> 00:02:12,640 et vérifions qu'il n'y a pas d'erreur. 33 00:02:12,640 --> 00:02:17,780 Si les étiquettes sont fournies, les modèles de la bibliothèque Transformers renvoient toujours 34 00:02:17,780 --> 00:02:18,840 directement la perte. 35 00:02:18,840 --> 00:02:24,129 Nous pourrons faire `loss.backward()` pour calculer tous les gradients, et nous aurons alors besoin d'un optimiseur 36 00:02:24,129 --> 00:02:26,480 pour faire l'étape d'apprentissage. 37 00:02:26,480 --> 00:02:30,800 Nous utilisons ici l'optimiseur AdamW, qui est une variante d'Adam avec une décroissance de poids appropriée, 38 00:02:30,800 --> 00:02:35,040 mais vous pouvez choisir n'importe quel optimiseur PyTorch que vous aimez. 39 00:02:35,040 --> 00:02:39,519 En utilisant la perte précédente et en calculant les gradients avec `loss.backward()`, nous vérifions que 40 00:02:39,519 --> 00:02:43,510 nous pouvons faire l'étape d'optimisation sans aucune erreur. 41 00:02:43,510 --> 00:02:47,580 N'oubliez pas de mettre `zero_grad()` par la suite, sinon à l'étape suivante, ils seront ajoutés 42 00:02:47,580 --> 00:02:49,659 aux gradients que vous calculez ! 43 00:02:49,659 --> 00:02:53,620 Nous pourrions déjà écrire notre boucle d'entraînement, mais nous ajouterons deux autres choses pour la rendre 44 00:02:53,620 --> 00:02:55,590 aussi bonne que possible. 45 00:02:55,590 --> 00:03:01,150 Le premier est un planificateur de taux d'apprentissage, pour réduire progressivement notre taux d'apprentissage à 46 00:03:01,150 --> 00:03:02,150 0. 47 00:03:02,150 --> 00:03:06,180 La fonction `get_scheduler` de la bibliothèque Transformers n'est qu'une fonction pratique pour 48 00:03:06,180 --> 00:03:12,760 créer facilement un tel planificateur0 Vous pouvez à nouveau utiliser n'importe quel planificateur de taux d'apprentissage PyTorch à la place. 49 00:03:12,760 --> 00:03:17,299 Enfin, si nous voulons que notre entraînement prenne quelques minutes au lieu de quelques heures, 50 00:03:17,299 --> 00:03:19,580 nous devrons utiliser un GPU. 51 00:03:19,580 --> 00:03:24,340 La première étape consiste à en obtenir un, par exemple en utilisant un notebook Colab. 52 00:03:24,340 --> 00:03:29,090 Ensuite, vous devez réellement envoyer votre modèle et les données d'entraînement à l'aide d'un appareil Torch. 53 00:03:29,090 --> 00:03:35,659 Vérifiez les lignes suivantes pour avoir un appareil CUDA ou préparez vous à ce que votre entraînement dure plus d'une heure. 54 00:03:35,659 --> 00:03:38,450 Nous pouvons maintenant tout assembler ! 55 00:03:38,450 --> 00:03:42,470 Nous mettons d'abord notre modèle en mode d'entraînement (ce qui activera le comportement d'entraînement pour certaines 56 00:03:42,470 --> 00:03:47,900 couches comme la Dropout), puis parcourons le nombre d'époques que nous avons sélectionnées et toutes les données dans 57 00:03:47,900 --> 00:03:50,130 notre chargeur de données d'entraînement. 58 00:03:50,130 --> 00:03:54,560 Ensuite, nous passons par toutes les étapes que nous avons déjà vues : envoyer les données au GPU, calculer 59 00:03:54,560 --> 00:03:57,870 les sorties du modèle, et en particulier la perte. 60 00:03:57,870 --> 00:04:02,040 Utilisez la perte pour calculer les gradients, puis effectuez une étape d'apprentissage avec l'optimiseur. 61 00:04:02,040 --> 00:04:06,760 Mettez à jour le taux d'apprentissage dans notre planificateur pour la prochaine itération et mettez à 0 les gradients 62 00:04:06,760 --> 00:04:09,340 de l'optimiseur. 63 00:04:09,340 --> 00:04:13,590 Une fois cela terminé, nous pouvons évaluer notre modèle très facilement avec une métrique de la 64 00:04:13,590 --> 00:04:14,730 bibliothèque Datasets. 65 00:04:14,730 --> 00:04:22,470 Nous mettons d'abord notre modèle en mode d'évaluation, puis parcourons toutes les données dans le 66 00:04:22,470 --> 00:04:23,900 chargeur de données d'évaluation. 67 00:04:23,900 --> 00:04:27,480 Comme nous l'avons vu dans la vidéo sur l'API Trainer, le modèle génère des logits et nous devons appliquer 68 00:04:27,480 --> 00:04:31,350 la fonction argmax pour les convertir en prédictions. 69 00:04:31,350 --> 00:04:36,910 L'objet `metric` a alors une méthode `add_batch` que nous pouvons utiliser pour lui envoyer ces prédictions intermédiaires. 70 00:04:36,910 --> 00:04:40,590 Une fois la boucle d'évaluation terminée, nous n'avons plus qu'à appeler la méthode de calcul pour obtenir nos 71 00:04:40,590 --> 00:04:41,620 résultats finaux ! 72 00:04:41,620 --> 00:04:50,760 Félicitations, vous avez maintenant finetuné un modèle tout seul !
course/subtitles/fr/29_write-your-training-loop-in-pytorch.srt/0
{ "file_path": "course/subtitles/fr/29_write-your-training-loop-in-pytorch.srt", "repo_id": "course", "token_count": 3493 }
158
1 00:00:05,200 --> 00:00:08,080 Examinons le pipeline de classification de tokens. 2 00:00:10,000 --> 00:00:13,920 Dans la vidéo sur le pipeline, nous avons examiné les différentes applications prêtes à l'emploi 3 00:00:13,920 --> 00:00:19,840 prises en charge par la bibliothèque Transformers, l'une d'entre elles étant la classification de tokens. Par exemple en prédisant pour 4 00:00:19,840 --> 00:00:24,960 chaque mot d'une phrase s'il correspond à une personne, une organisation ou un lieu. 5 00:00:26,400 --> 00:00:30,240 On peut même regrouper les tokens correspondant à une même entité, 6 00:00:30,240 --> 00:00:34,960 par exemple tous les tokens qui ont formé ici le mot « Sylvain », ou « Hugging » et « Face ». 7 00:00:36,960 --> 00:00:42,480 Le pipeline de classification de tokens fonctionne de la même manière que le pipeline de classification de texte que nous avons étudié 8 00:00:42,480 --> 00:00:49,360 dans une vidéo précédente. Il y a trois étapes : la tokenisation, le modèle et le post-traitement. 9 00:00:50,720 --> 00:00:55,680 Les deux premières étapes sont identiques au pipeline de classification de texte, sauf que nous utilisons un 10 00:00:55,680 --> 00:01:01,760 modèle de classification de token automatique au lieu d'un modèle de classification de séquence. Nous tokenisons notre texte, puis nous 11 00:01:01,760 --> 00:01:07,360 le donnons au modèle. Au lieu d'obtenir un numéro pour chaque étiquette possible pour la phrase entière, 12 00:01:07,360 --> 00:01:13,760 nous obtenons un numéro pour chacune des 9 étiquettes possibles pour chaque token de la phrase, ici 19. 13 00:01:15,120 --> 00:01:19,600 Comme tous les autres modèles de la bibliothèque Transformers, notre modèle génère des logits, 14 00:01:19,600 --> 00:01:26,160 que nous transformons en prédictions en utilisant une SoftMax. Nous obtenons également l'étiquette prédite pour chaque token en 15 00:01:26,160 --> 00:01:30,000 prenant la prédiction maximale (puisque la fonction softmax préserve l'ordre, nous aurions pu le 16 00:01:30,000 --> 00:01:35,200 faire sur les logits si nous n'avions pas besoin des prédictions). La configuration du modèle contient 17 00:01:35,200 --> 00:01:41,200 l'application des étiquettes dans son champ `id2label`. En l'utilisant, nous pouvons associer chaque token à son étiquette correspondante. Le 18 00:01:41,200 --> 00:01:46,400 L'étiquette `O` correspond à « aucune entité », c'est pourquoi nous ne l'avons pas vu dans nos résultats de la première diapositive. 19 00:01:47,040 --> 00:01:51,360 En plus de l'étiquette et de la probabilité, ces résultats incluaient le caractère de début et de 20 00:01:51,360 --> 00:01:56,960 fin dans la phrase. Nous devrons utiliser l'association de décalage du tokenizer pour les obtenir 21 00:01:56,960 --> 00:02:02,080 (regardez la vidéo en lien ci-dessous si vous ne les connaissez pas déjà). Ensuite, en parcourant chaque 22 00:02:02,080 --> 00:02:08,240 token ayant une étiquette distincte de `O`, nous pouvons créer la liste des résultats que nous avons obtenus avec notre premier pipeline. 23 00:02:08,240 --> 00:02:13,360 La dernière étape consiste à regrouper les tokens correspondant à la même entité. 24 00:02:13,360 --> 00:02:17,680 C'est pourquoi nous avions deux étiquettes pour chaque type d'entité : `I-PER` et `B-PER` par exemple. 25 00:02:18,240 --> 00:02:21,840 Cela nous permet de savoir si un token est dans la même entité que le précédent. 26 00:02:23,120 --> 00:02:26,720 Notez qu'il existe deux manières d'étiqueter utilisées pour la classification des tokens, 27 00:02:26,720 --> 00:02:31,680 l'une (en rose ici) utilise l'étiquette `B-PER` au début de chaque nouvelle entité, mais l'autre 28 00:02:31,680 --> 00:02:38,320 (en bleu) ne l'utilise que pour séparer deux entités adjacentes du même type. Dans les deux cas, nous pouvons 29 00:02:38,320 --> 00:02:44,720 marquer une nouvelle entité chaque fois que nous voyons apparaître une nouvelle étiquette (avec le préfixe `I` ou `B`), puis 30 00:02:44,720 --> 00:02:50,160 prendre tous les tokens suivants étiquetés de la même manière, avec un drapeau `I`. Ceci, associé 31 00:02:50,160 --> 00:03:01,040 mapl'association de décalage pour obtenir les caractères de début et de fin, nous permet d'obtenir l'étendue des textes pour chaque entité.
course/subtitles/fr/45_inside-the-token-classification-pipeline-(pytorch).srt/0
{ "file_path": "course/subtitles/fr/45_inside-the-token-classification-pipeline-(pytorch).srt", "repo_id": "course", "token_count": 1704 }
159
1 00:00:05,360 --> 00:00:10,720 Voyons comment prétraiter un jeu de données pour le résumé de texte. C'est la tâche de 2 00:00:10,720 --> 00:00:16,976 bien résumer un long document. Cette vidéo se concentrera sur la façon de prétraiter votre jeu de données une fois que vous 3 00:00:16,976 --> 00:00:21,840 avez réussi à le mettre dans le format suivant : une colonne pour les documents longs et une pour 4 00:00:21,840 --> 00:00:27,360 les résumés. Voici comment nous pouvons y parvenir avec la bibliothèque Datasets sur le jeu de données XSUM. 5 00:00:28,400 --> 00:00:32,400 Tant que vous parvenez à faire en sorte que vos données ressemblent à ceci, vous devriez pouvoir suivre les mêmes étapes. 6 00:00:33,520 --> 00:00:37,280 Pour une fois, nos étiquettes ne sont pas des entiers correspondant à certaines classes, 7 00:00:37,280 --> 00:00:43,120 mais du texte brut. Nous devrons donc les tokeniser, comme nos entrées. Il y a cependant un petit piège 8 00:00:43,120 --> 00:00:47,760 car nous devons tokeniser nos cibles dans le gestionnaire de contexte `as_target_tokenzier`. 9 00:00:48,480 --> 00:00:53,200 En effet, les tokens spéciaux que nous ajoutons peuvent être légèrement différents pour les entrées et les cibles. 10 00:00:53,760 --> 00:00:58,320 Le tokenizer doit donc savoir lequel il traite. Le traitement du jeu de données 11 00:00:58,320 --> 00:01:03,520 est alors très facile avec la fonction `map`. Les résumés étant généralement beaucoup plus courts que les 12 00:01:03,520 --> 00:01:07,840 documents, vous devez absolument choisir des longueurs maximales différentes pour les entrées et les cibles. 13 00:01:08,640 --> 00:01:12,640 À ce stade, vous pouvez choisir de rembourrer cette longueur maximale en définissant `padding=max_length`. 14 00:01:13,840 --> 00:01:17,360 Ici, nous allons vous montrer comment rembourrer dynamiquement car cela nécessite une étape supplémentaire. 15 00:01:18,640 --> 00:01:23,360 Vos entrées et cibles sont toutes des phrases de différentes longueurs. Nous rembourrons les entrées et les 16 00:01:23,360 --> 00:01:27,920 cibles séparément car la longueur maximale des entrées et des cibles est complètement différente. 17 00:01:28,880 --> 00:01:32,320 Ensuite, nous rembourrons les entrées aux longueurs maximales parmi les entrées, 18 00:01:32,320 --> 00:01:38,800 et de même pour les cibles. Nous rembourrons les entrées avec le token <pad> et les cibles avec l'indice -100 19 00:01:38,800 --> 00:01:44,400 pour nous assurer qu'elles ne sont pas prises en compte dans le calcul de la perte. La bibliothèque Transformers 20 00:01:44,400 --> 00:01:49,200 nous fournit un assembleur de données pour faire tout cela automatiquement. Vous pouvez ensuite le transmettre 21 00:01:49,200 --> 00:01:55,440 au Trainer avec vos jeux de données ou l'utiliser dans la méthode `to_tf_dataset` avant d'utiliser `model.fit()` dans vos modèles en Keras.
course/subtitles/fr/61_data-processing-for-summarization.srt/0
{ "file_path": "course/subtitles/fr/61_data-processing-for-summarization.srt", "repo_id": "course", "token_count": 1172 }
160
1 00:00:04,520 --> 00:00:07,400 Bienvenue dans la série d'Hugging Face sur les tâches ! 2 00:00:07,400 --> 00:00:11,870 Dans cette vidéo, nous allons jeter un coup d'œil à la tâche de classification de tokens. 3 00:00:11,870 --> 00:00:17,900 La classification de tokens consiste à attribuer une étiquette à chaque token d'une phrase 4 00:00:17,900 --> 00:00:23,310 Il existe plusieurs tâches de classification de tokens, les plus courantes étant la reconnaissance d’entités nommées 5 00:00:23,310 --> 00:00:26,430 et le « part-of-speech ». 6 00:00:26,430 --> 00:00:31,640 Jetons un coup d'œil rapide à la tâche de reconnaissance d'entités nommées 7 00:00:31,640 --> 00:00:38,400 L'objectif de cette tâche est de trouver les entités dans un texte, comme une personne, un lieu 8 00:00:38,400 --> 00:00:40,210 ou une organisation. 9 00:00:40,210 --> 00:00:45,250 Cette tâche est formulée comme l'étiquetage de chaque token avec une classe pour chaque entité, 10 00:00:45,250 --> 00:00:51,719 et une autre classe pour les tokens qui n'ont pas d'entité. 11 00:00:51,719 --> 00:00:55,670 Une autre tâche de classification de tokens est le « part-of-speech ». 12 00:00:55,670 --> 00:01:01,399 L'objectif de cette tâche est d'étiqueter les mots pour une partie particulière du texte, comme 13 00:01:01,399 --> 00:01:05,900 un nom, un pronom, un adjectif, un verbe, etc. 14 00:01:05,900 --> 00:01:11,270 Cette tâche est formulée comme l'étiquetage de chaque token avec les parties du texte. 15 00:01:11,270 --> 00:01:19,659 Les modèles de classification de tokens sont évalués sur l'exactitude, le rappel, la précision et le score F1. 16 00:01:19,659 --> 00:01:22,950 Les métriques sont calculées pour chacune des classes. 17 00:01:22,950 --> 00:01:28,040 Nous calculons les vrais positifs, les vrais négatifs et les faux positifs pour calculer la précision 18 00:01:28,040 --> 00:01:31,829 et le rappel, et prenons leur moyenne harmonique pour obtenir le score F1. 19 00:01:31,829 --> 00:01:42,329 Ensuite, nous les calculons pour chaque classe et prenons la moyenne globale pour évaluer notre modèle. 20 00:01:42,329 --> 00:01:45,680 Un exemple de jeu de données utilisé pour cette tâche est ConLL2003. 21 00:01:45,680 --> 00:01:51,750 Ici, chaque token appartient à une certaine classe d'entités nommées, désignées par les indices de la 22 00:01:51,750 --> 00:01:55,380 liste contenant les étiquettes. 23 00:01:55,380 --> 00:02:00,720 Vous pouvez extraire des informations importantes de factures à l'aide de modèles de reconnaissance d'entités nommées, 24 00:02:00,720 --> 00:02:07,070 telles que la date, le nom de l'organisation ou l'adresse. 25 00:02:07,070 --> 00:02:16,840 Pour plus d'informations sur la tâche de classification de tokens, consultez le cours d'Hugging Face.
course/subtitles/fr/tasks_00_🤗-tasks-token-classification.srt/0
{ "file_path": "course/subtitles/fr/tasks_00_🤗-tasks-token-classification.srt", "repo_id": "course", "token_count": 1139 }
161
1 00:00:00,069 --> 00:00:01,341 (屏幕嗖嗖声) (screen whooshes) 2 00:00:01,341 --> 00:00:02,449 (面部标志呼啸而过) (face logo whooshes) 3 00:00:02,449 --> 00:00:05,880 (屏幕嗖嗖声) (screen whooshes) 4 00:00:05,880 --> 00:00:07,080 - 本节课内容是: pipeline 函数 - The pipeline function. 5 00:00:09,540 --> 00:00:12,020 pipeline 函数是 Transformers 库中的 The pipeline function is the most high level API 6 00:00:12,020 --> 00:00:14,010 最顶层的 API of the Transformers library. 7 00:00:14,010 --> 00:00:16,050 它将所有步骤重新组合在一起 It regroups together all the steps 8 00:00:16,050 --> 00:00:18,873 从而实现从原始文本到可用预测的转换。 to go from raw texts to usable predictions. 9 00:00:20,228 --> 00:00:22,980 使用的模型是 pipeline 的核心, The model used is at the core of a pipeline, 10 00:00:22,980 --> 00:00:24,390 但 pipeline 还包括 but the pipeline also include 11 00:00:24,390 --> 00:00:26,610 所有必要的预处理, all the necessary pre-processing, 12 00:00:26,610 --> 00:00:30,240 因为模型不期望得到文本,而是数字, since the model does not expect texts, but number, 13 00:00:30,240 --> 00:00:32,040 以及一些后期处理, as well as some post-processing, 14 00:00:32,040 --> 00:00:34,533 使模型的输出可读。 to make the output of the model human-readable. 15 00:00:35,910 --> 00:00:37,593 让我们通过一个情绪分析 pipeline Let's look at a first example 16 00:00:37,593 --> 00:00:39,693 的例子来解释一下。 with the sentiment analysis pipeline. 17 00:00:40,740 --> 00:00:44,670 此 pipeline 对给定的输入执行文本分类 This pipeline performs text classification on a given input 18 00:00:44,670 --> 00:00:46,953 并确定它的情绪是正面的还是负面的。 and determines if it's positive or negative. 19 00:00:47,910 --> 00:00:51,750 在这里,它为给定文本标记正面情绪的标签, Here, it attributed the positive label on the given text, 20 00:00:51,750 --> 00:00:54,413 置信度为 95%。 with a confidence of 95%. 21 00:00:55,650 --> 00:00:58,470 您可以将多个文本传递到同一个 pipeline, You can pass multiple texts to the same pipeline, 22 00:00:58,470 --> 00:01:00,270 它们将被处理并 which will be processed and passed 23 00:01:00,270 --> 00:01:02,673 作为一个批次传递给模型。 through the model together as a batch. 24 00:01:03,570 --> 00:01:05,970 输出是包含单个结果的列表 The output is a list of individual results 25 00:01:05,970 --> 00:01:07,923 与输入文本的顺序相同。 in the same order as the input texts. 26 00:01:08,790 --> 00:01:12,270 在这里,我们为第一个文本找到了相同的标签和分数, Here we find the same label and score for the first text, 27 00:01:12,270 --> 00:01:14,443 第二个文本被判断为否定情绪 and the second text is judged negative 28 00:01:14,443 --> 00:01:17,243 置信度为 99.9%。 with a confidence of 99.9%. 29 00:01:18,720 --> 00:01:20,700 零样本分类 pipeline The zero-shot classification pipeline 30 00:01:20,700 --> 00:01:23,610 是一个更通用的文本分类 pipeline, is a more general text-classification pipeline, 31 00:01:23,610 --> 00:01:26,370 它允许您提供所需的标签。 it allows you to provide the labels you want. 32 00:01:26,370 --> 00:01:29,850 在这里,我们想根据标签对输入文本进行分类, Here we want to classify our input text along the labels, 33 00:01:29,850 --> 00:01:32,643 标签有教育、政治和商业。 education, politics, and business. 34 00:01:33,540 --> 00:01:35,580 pipeline 都能够成功识别 The pipeline successfully recognizes 35 00:01:35,580 --> 00:01:38,280 与其他标签相比,它更多地是关于教育, it's more about education than the other labels, 36 00:01:38,280 --> 00:01:40,643 置信度为 84%。 with a confidence of 84%. 37 00:01:41,670 --> 00:01:43,110 继续执行其他任务, Moving on to other tasks, 38 00:01:43,110 --> 00:01:45,030 文本生成 pipeline 将 the text generation pipeline will 39 00:01:45,030 --> 00:01:46,533 自动完成给定的提示。 auto-complete a given prompt. 40 00:01:47,460 --> 00:01:49,980 输出带有一点随机性, The output is generated with a bit of randomness, 41 00:01:49,980 --> 00:01:52,800 所以每次在给定的提示上调用生成器对象时 so it changes each time you call the generator object 42 00:01:52,800 --> 00:01:53,763 这个结果都会改变。 on a given prompt. 43 00:01:54,990 --> 00:01:57,123 到目前为止,我们已经使用了 pipeline API Up until now, we've used the the pipeline API 44 00:01:57,123 --> 00:02:00,360 结合与每个任务关联的默认模型来演示, with the default model associated to each task, 45 00:02:00,360 --> 00:02:02,880 但您可以将它与任何经过预训练的模型一起使用 but you can use it with any model that has been pretrained 46 00:02:02,880 --> 00:02:04,263 或根据此任务进行微调。 or fine-tuned on this task. 47 00:02:06,540 --> 00:02:10,350 进入模型中心 huggingface.co/models Going on the model hub, huggingface.co/models 48 00:02:10,350 --> 00:02:13,350 您可以按任务过滤可用模型。 you can filter the available models by task. 49 00:02:13,350 --> 00:02:17,190 我们之前示例中使用的默认模型是 gpt2, The default model used in our previous example was gpt2, 50 00:02:17,190 --> 00:02:19,290 但还有更多模型可供选择, but there are many more models available, 51 00:02:19,290 --> 00:02:20,523 且不仅仅是英语。 and not just in English. 52 00:02:21,450 --> 00:02:23,670 让我们回到文本生成 pipeline Let's go back to the text generation pipeline 53 00:02:23,670 --> 00:02:26,193 并用另一个模型 distilgpt2 加载它。 and load it with another model, distilgpt2. 54 00:02:27,060 --> 00:02:28,950 这是 gpt2 的轻量级版本 This is a lighter version of gpt2 55 00:02:28,950 --> 00:02:30,603 由 Hugging Face 团队创建。 created by the Hugging Face team. 56 00:02:31,740 --> 00:02:34,110 将 pipeline 应用于给定提示时, When applying the pipeline to a given prompt, 57 00:02:34,110 --> 00:02:36,360 我们可以指定几个参数 we can specify several arguments 58 00:02:36,360 --> 00:02:39,240 例如生成文本的最大长度, such as the maximum length of the generated texts, 59 00:02:39,240 --> 00:02:41,700 或者我们想要返回的句子数量, or the number of sentences we want to return, 60 00:02:41,700 --> 00:02:44,150 因为这一代有一些随机性。 since there is some randomness in the generation. 61 00:02:46,080 --> 00:02:48,750 通过猜测句子中的下一个单词来生成文本 Generating texts by guessing the next word in a sentence 62 00:02:48,750 --> 00:02:51,450 是 GPT-2 的预训练目标。 was the pretraining objective of GPT-2. 63 00:02:51,450 --> 00:02:55,140 掩码填充 pipeline 是 BERT 的预训练目标, The fill mask pipeline is the pretraining objective of BERT, 64 00:02:55,140 --> 00:02:57,363 这是猜测掩码词的值。 which is to guess the value of masked word. 65 00:02:58,260 --> 00:03:01,020 在这种情况下,我们询问两个最可能的值 In this case, we ask the two most likely values 66 00:03:01,020 --> 00:03:03,660 对于缺失的词,根据模型, for the missing words, according to the model, 67 00:03:03,660 --> 00:03:07,053 并通过数学计算推测可能的答案。 and get mathematical or computational as possible answers. 68 00:03:08,280 --> 00:03:10,170 Transformers 模型可以执行的另一项任务 Another task Transformers model can perform 69 00:03:10,170 --> 00:03:12,660 就是对句子中的每一个词进行分类 is to classify each word in the sentence 70 00:03:12,660 --> 00:03:14,970 而不是整个句子。 instead of the sentence as a whole. 71 00:03:14,970 --> 00:03:18,390 其中一个例子是命名实体识别, One example of this is Named Entity Recognition, 72 00:03:18,390 --> 00:03:20,820 这是识别实体的任务, which is the task of identifying entities, 73 00:03:20,820 --> 00:03:25,323 例如句子中的人、组织或地点。 such as persons, organizations or locations in a sentence. 74 00:03:26,400 --> 00:03:30,570 在这里,模型正确地找到了人 Sylvain, Here, the model correctly finds the person, Sylvain, 75 00:03:30,570 --> 00:03:32,453 组织,是 Hugging Face the organization, Hugging Face, 76 00:03:32,453 --> 00:03:35,010 以及位置,布鲁克林, as well as the location, Brooklyn, 77 00:03:35,010 --> 00:03:36,303 在输入文本中。 inside the input text. 78 00:03:37,661 --> 00:03:40,230 使用的 grouped_entities=True 参数 The grouped_entities=True argument used 79 00:03:40,230 --> 00:03:42,330 就是把 pipeline 组合在一起 is to make the pipeline group together 80 00:03:42,330 --> 00:03:44,790 链接到同一实体的不同词, the different words linked to the same entity, 81 00:03:44,790 --> 00:03:46,353 比如这里的 Hugging 和 Face。 such as Hugging and Face here. 82 00:03:48,270 --> 00:03:50,670 pipeline API 可用的另一个任务 Another task available with the pipeline API 83 00:03:50,670 --> 00:03:52,920 是抽取式问答。 is extractive question answering. 84 00:03:52,920 --> 00:03:55,380 提供上下文和问题, Providing a context and a question, 85 00:03:55,380 --> 00:03:58,290 该模型将识别上下文中的文本范围 the model will identify the span of text in the context 86 00:03:58,290 --> 00:04:00,190 包含问题的答案。 containing the answer to the question. 87 00:04:01,650 --> 00:04:03,960 获取长文的简短摘要 Getting short summaries of very long articles 88 00:04:03,960 --> 00:04:06,540 也是 Transformers 库可以提供的功能, is also something the Transformers library can help with, 89 00:04:06,540 --> 00:04:08,140 这是摘要 pipeline 提供的功能。 with the summarization pipeline. 90 00:04:09,480 --> 00:04:12,570 pipeline API 支持的最后一个任务 Finally, the last task supported by the pipeline API 91 00:04:12,570 --> 00:04:14,130 是翻译。 is translation. 92 00:04:14,130 --> 00:04:16,170 这里我们使用在模型中心 (Model Hub) 提供的 Here we use a French/English model 93 00:04:16,170 --> 00:04:17,460 法语/英语模型 found on the model hub 94 00:04:17,460 --> 00:04:19,893 获取我们输入文本的英文版本。 to get the English version of our input text. 95 00:04:21,600 --> 00:04:23,490 这是所有任务的简要总结 Here is a brief summary of all the tasks 96 00:04:23,490 --> 00:04:25,500 我们在这段视频中进行了调查。 we've looked into in this video. 97 00:04:25,500 --> 00:04:27,390 然后通过模型中心中提供的 Try then out through the inference widgets 98 00:04:27,390 --> 00:04:28,327 推理小部件进行尝试。 in the model hub. 99 00:04:30,459 --> 00:04:33,475 (屏幕嗖嗖声) (screen whooshes) 100 00:04:33,475 --> 00:04:35,175 (徽标嗖嗖声) (logo whooshes)
course/subtitles/zh-CN/01_the-pipeline-function.srt/0
{ "file_path": "course/subtitles/zh-CN/01_the-pipeline-function.srt", "repo_id": "course", "token_count": 5350 }
162
1 00:00:00,373 --> 00:00:02,956 (微妙的爆炸) (subtle blast) 2 00:00:05,400 --> 00:00:07,590 - 如何一起批量输入。 - How to batch inputs together. 3 00:00:07,590 --> 00:00:09,240 在本视频中,我们将看到如何 In this video, we will see how 4 00:00:09,240 --> 00:00:11,073 将输入序列一起批处理。 to batch input sequences together. 5 00:00:12,137 --> 00:00:15,420 一般来说,我们想要输入模型的句子 In general, the sentences we want to pass through our model 6 00:00:15,420 --> 00:00:17,670 不会都有相同的长度。 won't all have the same lengths. 7 00:00:17,670 --> 00:00:19,740 在这里,我们使用模型 Here, we are using the model we saw 8 00:00:19,740 --> 00:00:22,080 在情绪分析 pipeline 中讲的 in the sentiment analysis pipeline 9 00:00:22,080 --> 00:00:24,063 并想对两个句子进行分类。 and want to classify two sentences. 10 00:00:24,900 --> 00:00:27,360 将它们分词化并映射每个分词时 *[译者注: token, tokenization, tokenizer 等词均译成了 分词*, 实则不翻译最佳] When tokenizing them and mapping each token 11 00:00:27,360 --> 00:00:29,610 到其相应的输入 ID, to its corresponding input IDs, 12 00:00:29,610 --> 00:00:31,593 我们得到两个不同长度的列表。 we get two lists of different lengths. 13 00:00:33,240 --> 00:00:35,340 尝试创建 tensor 或 NumPy 数组 Trying to create a tensor or a NumPy array 14 00:00:35,340 --> 00:00:38,220 从这两个列表中, 将导致错误, from those two lists will result in an error, 15 00:00:38,220 --> 00:00:41,043 因为所有数组和张量都应该是矩形的。 because all arrays and tensors should be rectangular. 16 00:00:42,240 --> 00:00:44,160 突破此限制的一种方法 One way to overcome this limit 17 00:00:44,160 --> 00:00:45,690 是让第二句 is to make the second sentence 18 00:00:45,690 --> 00:00:47,640 与第一个长度相同 the same length as the first 19 00:00:47,640 --> 00:00:50,463 通过根据需要多次添加特殊分词。 by adding a special token as many times as necessary. 20 00:00:51,600 --> 00:00:53,970 另一种方法是截断第一个序列 Another way would be to truncate the first sequence 21 00:00:53,970 --> 00:00:55,710 到第二个的长度, to the length of the second, 22 00:00:55,710 --> 00:00:58,140 但我们会失去很多信息 but we would then lose a lot of information 23 00:00:58,140 --> 00:01:01,083 而这可能是正确分类句子所必需的。 that might be necessary to properly classify the sentence. 24 00:01:02,190 --> 00:01:04,830 一般来说,我们只截断句子 In general, we only truncate sentences 25 00:01:04,830 --> 00:01:06,840 当它们长于最大长度时 when they are longer than the maximum length 26 00:01:06,840 --> 00:01:08,073 该模型可以处理。 the model can handle. 27 00:01:09,720 --> 00:01:11,850 用于填充第二句的值 The value used to pad the second sentence 28 00:01:11,850 --> 00:01:13,740 不应被随意挑选; should not be picked randomly; 29 00:01:13,740 --> 00:01:16,680 该模型已经用特定的填充 ID 进行了预训练, the model has been pretrained with a certain padding ID, 30 00:01:16,680 --> 00:01:19,533 你可以在 tokenizer.pad_token_id 中找到它。 which you can find in tokenizer.pad_token_id. 31 00:01:21,090 --> 00:01:22,800 现在我们已经填充了句子, Now that we have padded our sentences, 32 00:01:22,800 --> 00:01:24,303 我们可以和他们做成一批。 we can make a batch with them. 33 00:01:25,380 --> 00:01:28,320 如果我们分别将两个句子传递给模型 If we pass the two sentences to the model separately 34 00:01:28,320 --> 00:01:30,120 和并批在一起,然而 and batched together however, 35 00:01:30,120 --> 00:01:32,100 我们注意到我们没有得到相同的结果 we notice that we don't get the same results 36 00:01:32,100 --> 00:01:34,060 对于被填充的句子, for the sentence that is padded, 37 00:01:34,060 --> 00:01:35,403 在这里,第二个。 here, the second one. 38 00:01:36,390 --> 00:01:39,420 是 Transformers 有问题?不。 It's at the backend in the Transformers Library? No. 39 00:01:39,420 --> 00:01:40,770 如果你还记得 Transformer 模型 If you remember that Transformer models 40 00:01:40,770 --> 00:01:42,810 大量使用注意力层, make heavy use of attention layers, 41 00:01:42,810 --> 00:01:45,210 这应该不足为奇; this should not come as a total surprise; 42 00:01:45,210 --> 00:01:48,277 在计算每个分词的上下文表示时, when computing the contextual representation of each token, 43 00:01:48,277 --> 00:01:50,910 注意层查看所有其他词 the attention layers look at all the other words 44 00:01:50,910 --> 00:01:52,410 在句子中。 in the sentence. 45 00:01:52,410 --> 00:01:53,850 如果我们只有这句话 If we have just the sentence 46 00:01:53,850 --> 00:01:56,970 或者添加了几个填充 token 的句子, or the sentence with several padding tokens added, 47 00:01:56,970 --> 00:01:59,073 我们没有得到相同的值是合乎逻辑的。 it's logical we don't get the same values. 48 00:02:00,270 --> 00:02:03,030 要在有或没有填充的情况下获得相同的结果, To get the same results with or without padding, 49 00:02:03,030 --> 00:02:05,340 我们需要向注意力层表明 we need to indicate to the attention layers 50 00:02:05,340 --> 00:02:08,070 他们应该忽略那些填充 token 。 that they should ignore those padding tokens. 51 00:02:08,070 --> 00:02:10,620 这是通过创建一个注意力掩码来完成的, This is done by creating an attention mask, 52 00:02:10,620 --> 00:02:13,320 与输入 ID 具有相同形状的张量, a tensor with the same shape as the input IDs, 53 00:02:13,320 --> 00:02:14,733 用 0 和 1 。 with zeros and ones. 54 00:02:15,780 --> 00:02:18,120 1 的分词表示注意层 Ones indicate the tokens the attention layers 55 00:02:18,120 --> 00:02:20,100 应该结合上下文考虑 should consider in the context 56 00:02:20,100 --> 00:02:22,100 并且 0 的分词他们应该忽略。 and zeros the tokens they should ignore. 57 00:02:23,520 --> 00:02:26,760 现在,将这个注意掩码与输入 ID 一起传入 Now, passing this attention mask along with the input ID 58 00:02:26,760 --> 00:02:28,170 会给我们相同的结果 will give us the same results 59 00:02:28,170 --> 00:02:31,170 就像我们将两个句子单独发送给模型一样。 as when we sent the two sentences individually to the model. 60 00:02:32,400 --> 00:02:34,950 这一切都是由分词器在幕后完成的 This is all done behind the scenes by the tokenizer 61 00:02:34,950 --> 00:02:36,900 当你将它应用于几个句子时 when you apply it to several sentences 62 00:02:36,900 --> 00:02:38,613 设参数 padding=True。 with the flag padding=True. 63 00:02:39,599 --> 00:02:41,490 它将应用具有适当值的填充 It will apply the padding with the proper value 64 00:02:41,490 --> 00:02:43,140 对较小的句子 to the smaller sentences 65 00:02:43,140 --> 00:02:45,423 并创建适当的注意掩码。 and create the appropriate attention mask. 66 00:02:46,993 --> 00:02:49,576 (微妙的爆炸) (subtle blast)
course/subtitles/zh-CN/17_batching-inputs-together-(pytorch).srt/0
{ "file_path": "course/subtitles/zh-CN/17_batching-inputs-together-(pytorch).srt", "repo_id": "course", "token_count": 3541 }
163
1 00:00:00,321 --> 00:00:01,497 (空气呼啸) (air whooshing) 2 00:00:01,497 --> 00:00:02,330 (笑脸弹出) (smiley face popping) 3 00:00:02,330 --> 00:00:05,130 (空气呼啸) (air whooshing) 4 00:00:05,130 --> 00:00:06,830 - [Instructor] push_to_hub API。 - [Instructor] So push_to_hub API. 5 00:00:08,310 --> 00:00:10,533 让我们看一下 push_to_hub API。 Let's have a look at the push_to_hub API. 6 00:00:11,730 --> 00:00:14,640 你需要使用你的 Hugging Face 帐户登录 You will need to be logged in with your Hugging Face account 7 00:00:14,640 --> 00:00:17,400 你可以通过执行第一个单元格里的操作来登录, which you can do by executing this first cell, 8 00:00:17,400 --> 00:00:21,123 或者在 terminal 中输入 huggingface-cli login。 or by typing huggingface-cli login in a terminal. 9 00:00:21,990 --> 00:00:26,640 只需输入你的用户名和密码,然后点击登录, Just enter you username and password, then click login, 10 00:00:26,640 --> 00:00:28,620 登录后将存储一个授权 token this will store a authentication token 11 00:00:28,620 --> 00:00:30,670 到你正在使用的机器的缓存中。 in the cache of the machine you're using. 12 00:00:31,890 --> 00:00:35,790 现在,让我们基于 GLUE COLA 数据集 Now, let's launch a fine tuning of a BERT model 13 00:00:35,790 --> 00:00:37,920 对 BERT 模型进行微调。 on the GLUE COLA dataset. 14 00:00:37,920 --> 00:00:39,600 我们不会深入探讨微调代码 We won't go over the fine tuning code 15 00:00:39,600 --> 00:00:42,270 你可以在任何 transformer 教程 because you can find it in any transformer tutorial, 16 00:00:42,270 --> 00:00:44,670 或查看下面的视频链接找到相关参考。 or by looking at the videos link below. 17 00:00:44,670 --> 00:00:46,470 我们感兴趣的是 What interests us here is 18 00:00:46,470 --> 00:00:48,970 如何在训练期间利用 model hub。 how we can leverage the model hub during training. 19 00:00:49,860 --> 00:00:52,980 这是通过 “push_to_hub=true” 参数完成的 This is done with the "push_to_hub=true" argument 20 00:00:52,980 --> 00:00:55,530 将该参数添加到你的 TrainingArguments。 passed in your TrainingArguments. 21 00:00:55,530 --> 00:00:57,240 每次保存时将自动上传 This will automatically upload your model 22 00:00:57,240 --> 00:00:59,400 你的模型到 Hub,在我们的示例中, to the Hub each time it is saved, 23 00:00:59,400 --> 00:01:01,323 每个 epoch 都会如此操作。 so every epoch in our case. 24 00:01:02,280 --> 00:01:04,860 如果当前的被打断 This allows you to resume training from a different machine 25 00:01:04,860 --> 00:01:06,873 这允许你从不同的机器恢复训练之前的训练。 if the current one gets interrupted. 26 00:01:08,220 --> 00:01:10,440 该模型将使用 The model will be updated in your namespace 27 00:01:10,440 --> 00:01:14,640 你默认选择的输出目录的名称在你的 namespace 中更新。 with the name of the output directory you picked by default. 28 00:01:14,640 --> 00:01:16,020 你可以通过将其 You can choose another name 29 00:01:16,020 --> 00:01:19,113 传递给 hub_model_id 参数选择其他名称。 by passing it to the hub_model_id argument. 30 00:01:20,070 --> 00:01:23,370 你还可以通过传递完整的仓库名称 You can also push inside an organization you are a member of 31 00:01:23,370 --> 00:01:25,740 将模型 push 到你所属的组织内部, by passing a full repository name, 32 00:01:25,740 --> 00:01:28,933 使用 organization/ 的形式, with the name of the organization/, 33 00:01:28,933 --> 00:01:30,433 再加上你所选的 model ID。 the model ID you want to pick. 34 00:01:32,250 --> 00:01:34,650 完成后,我们就可以开始训练了, With that done, we can just launch training, 35 00:01:34,650 --> 00:01:36,093 稍等一下。 and wait a little bit. 36 00:01:36,960 --> 00:01:39,033 视频中会跳过等待的过程。 I'll cut the waiting time from the video. 37 00:01:43,260 --> 00:01:46,350 请注意,模型是异步 push 的, Note that the model is pushed asynchronously, 38 00:01:46,350 --> 00:01:47,730 意味着当你的模型上传到 hub 时, meaning that the training continues 39 00:01:47,730 --> 00:01:49,730 训练将继续进行。 while your model is uploaded to the hub. 40 00:01:51,060 --> 00:01:52,950 当你的第一次 commit 完成时, When your first commit is finished, 41 00:01:52,950 --> 00:01:55,650 你可以通过查看你的 namespace you can go inspect your model on the Hub 42 00:01:55,650 --> 00:01:57,960 去 Hub 检查你的模型, by looking inside your namespace, 43 00:01:57,960 --> 00:01:59,943 你会在最上面找到它。 and you'll find it at the very top. 44 00:02:01,980 --> 00:02:04,200 你甚至可以在继续训练的同时 You can even start playing with its inference widget 45 00:02:04,200 --> 00:02:06,630 开始使用它的 inference 小部件。 while it's continuing the training. 46 00:02:06,630 --> 00:02:09,270 Cola 数据集让模型确定 The Cola dataset tasks the model with determining 47 00:02:09,270 --> 00:02:11,970 句子在语法上是否是正确的。 if the sentence is grammatically correct on that. 48 00:02:11,970 --> 00:02:15,510 所以我们选择一个错误句子的例子来测试它。 So we pick an example of incorrect sentence to test it. 49 00:02:15,510 --> 00:02:16,950 请注意,第一次尝试使用它时, Note that it'll take a bit of time 50 00:02:16,950 --> 00:02:18,750 这需要一些时间才能在 inference API 中 to load your model inside the inference APIs, 51 00:02:18,750 --> 00:02:20,880 完成模型加载。 so first time you try to use it. 52 00:02:20,880 --> 00:02:23,280 我们将根据时间从视频中删掉。 We'll cut by time from the video. 53 00:02:23,280 --> 00:02:24,870 标签有点问题, There is something wrong with the labels, 54 00:02:24,870 --> 00:02:27,360 我们稍后会在本视频中修复它。 but we'll fix it later in this video. 55 00:02:27,360 --> 00:02:29,520 一旦你的训练完成, Once your training is finished, 56 00:02:29,520 --> 00:02:31,770 你应该使用 trainer.push_to_hub 方法 you should do one last push with the trainer 57 00:02:31,770 --> 00:02:33,840 最后再提交一次。 that pushed to a method. 58 00:02:33,840 --> 00:02:35,430 这其中有两个原因。 This is for two reason. 59 00:02:35,430 --> 00:02:36,750 首先,若你尚未完成 First, this will make sure 60 00:02:36,750 --> 00:02:39,180 这将确保你正在预测模型的 you are predicting the final version of your model 61 00:02:39,180 --> 00:02:40,680 最终版本。 if you didn't already. 62 00:02:40,680 --> 00:02:42,480 例如,如果你曾经是在每一步保存 For instance, if you used to save 63 00:02:42,480 --> 00:02:46,980 而不是每秒保存, every in step strategy instead of every second, 64 00:02:46,980 --> 00:02:48,180 这将创建一个 model card this will draft a model card 65 00:02:48,180 --> 00:02:51,120 那将是你的 model repo 的最初始页面。 that will be the landing page of your model repo. 66 00:02:51,120 --> 00:02:52,260 commit 完成后, Once the commit is done, 67 00:02:52,260 --> 00:02:54,810 让我们回到我们的 model 页面并刷新。 let's go back on our model page and refresh. 68 00:02:54,810 --> 00:02:56,820 我们可以看到 model card 的草稿 We can see the drafters model card 69 00:02:56,820 --> 00:02:58,080 其中包括信息, which includes information, 70 00:02:58,080 --> 00:03:00,381 以及哪个模型被调整过。 and which one model we find tuned. 71 00:03:00,381 --> 00:03:03,570 接下来是最终的评估 loss 和 metric, So final evaluation loss and metric, 72 00:03:03,570 --> 00:03:06,300 使用过的训练超参数, the training hyperparameter used, 73 00:03:06,300 --> 00:03:08,670 中间训练结果, the intermediate training results, 74 00:03:08,670 --> 00:03:10,320 以及我们使用的框架版本 and the framework versions we used 75 00:03:10,320 --> 00:03:13,173 以便其他人可以轻松地重现我们的结果。 so that other people can easily reproduce our results. 76 00:03:15,270 --> 00:03:16,860 在所有这些信息之上, On top of all that information, 77 00:03:16,860 --> 00:03:19,740 trainer 还包括一些 metadata,它可以 the trainer also included some metadata that is interpreted 78 00:03:19,740 --> 00:03:22,650 通过 model cloud 上的 HuggingFace 网站解析。 by the Hugging Face website in the model cloud. 79 00:03:22,650 --> 00:03:26,010 你将会得到一个漂亮的 widget 所返回的相关指标数值 You get the value of the metrics reported in a nice widget 80 00:03:26,010 --> 00:03:29,640 以及一个链接指向 leaderboard(Paper with Code)。 as well as a link to a leaderboard with paper with code. 81 00:03:29,640 --> 00:03:32,550 并且 Tensorboard 的运行结果也包含 So the Tensorboard runs have also been pushed 82 00:03:32,550 --> 00:03:34,560 在这份报告中,我们可以在 Model Hub 中 to this report, and we can look at them 83 00:03:34,560 --> 00:03:36,000 通过点击子菜单中的 directly from the model hub 84 00:03:36,000 --> 00:03:38,850 Training metrics 查看报告。 by clicking on the training metrics sub menu. 85 00:03:38,850 --> 00:03:39,795 如果你不使用 Trainer API If you are not using the Trainer API 86 00:03:39,795 --> 00:03:42,510 微调你的模型, to fine-tune your model, 87 00:03:42,510 --> 00:03:43,770 你可以在模型上直接 you can use a push_to_hub method 88 00:03:43,770 --> 00:03:46,427 使用 push_to_hub 方法和分词器。 on the model, and tokenizer directly. 89 00:03:46,427 --> 00:03:50,160 让我们测试一下以修复 inference widget 中的所有标签。 Let's test this to fix all labels in the inference widget. 90 00:03:50,160 --> 00:03:52,740 inference widget 使用不同的标签名称 The inference widget was using different names for labels 91 00:03:52,740 --> 00:03:54,810 因为我们没有在整数和标签名称之间 because we did not indicate the correspondence 92 00:03:54,810 --> 00:03:57,030 注明关联性。 between integer and label names. 93 00:03:57,030 --> 00:03:58,740 当推送模型配置到 hub 时, We can fix this in the configuration 94 00:03:58,740 --> 00:04:01,350 我们可以通过将 label2id by setting the label2id, 95 00:04:01,350 --> 00:04:04,170 和 id2label 字段设置为合适的值 and id2label fields through the proper values 96 00:04:04,170 --> 00:04:06,933 在配置中解决这个问题。 when pushing the model config to the hub. 97 00:04:07,950 --> 00:04:10,620 完成后,我们可以在网站上查看, Once this is done, we can check on the website, 98 00:04:10,620 --> 00:04:13,380 模型现在显示正确的标签。 and the model is now showing the proper label. 99 00:04:13,380 --> 00:04:15,240 现在模型在 hub 上, Now that the model is on the hub, 100 00:04:15,240 --> 00:04:17,370 我们可以在任何地方使用它 we can use it from anywhere 101 00:04:17,370 --> 00:04:19,920 就像我们对任何其他 Transformer 模型 as we would any other Transformer model 102 00:04:19,920 --> 00:04:21,113 使用 from_pretrained 方法 with the from_pretrained method 103 00:04:21,113 --> 00:04:22,923 或者使用 pipeline 函数。 or with the pipeline function. 104 00:04:34,350 --> 00:04:36,780 我们只需要使用 hub 的标识符, We just have to use the identifier from the hub, 105 00:04:36,780 --> 00:04:39,450 我们可以看到模型配置和权重 and we can see that the model configuration and weights 106 00:04:39,450 --> 00:04:42,483 以及分词处理后的文件会自动下载。 as well as the tokenized files are automatically downloaded. 107 00:04:53,880 --> 00:04:55,950 在下一次训练中尝试 push_to_hub API Try the push_to_hub API in the next training 108 00:04:55,950 --> 00:04:58,650 轻松与世界其他地方分享你的模型。 to easily share your model with the rest of the world. 109 00:05:01,151 --> 00:05:03,818 (空气呼啸) (air whooshing)
course/subtitles/zh-CN/33_the-push-to-hub-api-(pytorch).srt/0
{ "file_path": "course/subtitles/zh-CN/33_the-push-to-hub-api-(pytorch).srt", "repo_id": "course", "token_count": 5818 }
164
1 00:00:00,286 --> 00:00:02,869 (微妙的爆炸) (subtle blast) 2 00:00:04,694 --> 00:00:07,380 - 在这个视频中,我们将一起看到 - In this video, we will see together 3 00:00:07,380 --> 00:00:09,930 什么是规范化组件 what is the normalizer component 4 00:00:09,930 --> 00:00:13,023 我们会在每个 tokenizer 的开头找到它。 that we'd find at the beginning of each tokenizer. 5 00:00:14,550 --> 00:00:16,830 规范化操作包括 The normalization operation consists 6 00:00:16,830 --> 00:00:19,890 在应用一系列规范化规则 in applying a succession of normalization rules 7 00:00:19,890 --> 00:00:20,853 到原始文本时。 to the raw text. 8 00:00:21,870 --> 00:00:25,710 我们选择规范化规则来去除文本中的噪音 We choose normalization rules to remove noise in the text 9 00:00:25,710 --> 00:00:27,900 这似乎对学习没有用 which seem useless for the learning 10 00:00:27,900 --> 00:00:30,363 以及使用我们的语言模型。 and use of our language model. 11 00:00:33,090 --> 00:00:37,470 让我们来看一个非常多样化的句子,用不同的字体 Let's take a very diverse sentence with different fonts, 12 00:00:37,470 --> 00:00:39,780 大写和小写的字符, upper and lower case characters, 13 00:00:39,780 --> 00:00:43,083 重音符号、标点符号和多个空格, accents, punctuation and multiple spaces, 14 00:00:43,920 --> 00:00:46,683 看看几个 tokenizer 是如何规范化它的。 to see how several tokenizer normalize it. 15 00:00:48,488 --> 00:00:50,730 来自 FNet 模型的 tokenizer The tokenizer from the FNet model 16 00:00:50,730 --> 00:00:53,700 用字体变体改变了字母 has transformed the letter with font variants 17 00:00:53,700 --> 00:00:57,480 或圈入他们的基本版本 or circled into their basic version 18 00:00:57,480 --> 00:00:59,733 并删除了多个空格。 and has removed the multiple spaces. 19 00:01:00,960 --> 00:01:03,960 现在,如果我们看一下规范化 And now if we look at the normalization 20 00:01:03,960 --> 00:01:05,880 使用 Retribert 的分词器, with Retribert's tokenizer, 21 00:01:05,880 --> 00:01:08,010 我们可以看到它保留了字符 we can see that it keeps characters 22 00:01:08,010 --> 00:01:12,090 具有多种字体变体并保留多个空格, with several font variants and keeps the multiple spaces, 23 00:01:12,090 --> 00:01:14,223 但它删除了所有的重音。 but it removes all the accents. 24 00:01:16,170 --> 00:01:18,870 如果我们继续测试这种标准化 And if we continue to test this normalization 25 00:01:18,870 --> 00:01:23,040 与模型相关的许多其他 tokenizer of many other tokenizers associated to models 26 00:01:23,040 --> 00:01:25,110 我们可以在 Hub 上找到, that we can find on the Hub, 27 00:01:25,110 --> 00:01:28,833 我们看到他们还提出了其他类型的正常化。 we see that they also propose other kind of normalization. 28 00:01:33,900 --> 00:01:35,850 使用快速 tokenizer , With the fast tokenizers, 29 00:01:35,850 --> 00:01:39,060 很容易观察到选择的规范化 it's very easy to observe the normalization chosen 30 00:01:39,060 --> 00:01:41,193 对于当前加载的分词器。 for the currently loaded tokenizer. 31 00:01:42,330 --> 00:01:46,140 事实上,每个快速 tokenizer 的实例 Indeed, each instance of a fast tokenizer 32 00:01:46,140 --> 00:01:48,030 有一个底层 tokenizer has an underlying tokenizer 33 00:01:48,030 --> 00:01:51,390 来自存储的 HuggingFace Tokenizers 库 from the HuggingFace Tokenizers library stored 34 00:01:51,390 --> 00:01:53,643 在 backend_tokenizer 属性中。 in the backend_tokenizer attribute. 35 00:01:54,690 --> 00:01:58,470 这个对象本身有一个规范器属性 This object has itself a normalizer attribute 36 00:01:58,470 --> 00:02:01,830 我们可以使用, 多亏 normalize_str 方法 that we can use thanks to the normalize_str method 37 00:02:01,830 --> 00:02:03,153 以规范化一个字符串。 to normalize a string. 38 00:02:04,560 --> 00:02:08,700 因此,这种标准化非常实用, It is thus very practical that this normalization, 39 00:02:08,700 --> 00:02:11,070 使用在训练时 which was used at the time of the training 40 00:02:11,070 --> 00:02:12,903 保存分词器, of the tokenizer was saved, 41 00:02:13,857 --> 00:02:16,200 并且它会自动应用 and that it applies automatically 42 00:02:16,200 --> 00:02:19,233 当你要求训练过的 tokenizer 对文本进行分词时。 when you ask a trained tokenizer to tokenize a text. 43 00:02:21,000 --> 00:02:25,500 例如,如果我们没有包含 albert 标准化器, For example, if we hadn't included the albert normalizer, 44 00:02:25,500 --> 00:02:28,770 我们会有很多未知的 token we would have had a lot of unknown tokens 45 00:02:28,770 --> 00:02:30,930 通过标记这个句子 by tokenizing this sentence 46 00:02:30,930 --> 00:02:33,213 带有重音符号和大写字母。 with accents and capital letters. 47 00:02:35,730 --> 00:02:38,370 这种转变也可能是无法检测的 This transformation can also be undetectable 48 00:02:38,370 --> 00:02:40,050 通过简单的打印出来。 with a simple print. 49 00:02:40,050 --> 00:02:42,810 确实,请记住,对于计算机来说, Indeed, keep in mind that for a computer, 50 00:02:42,810 --> 00:02:45,840 文本只是连续的 0 和 1, text is only a succession of 0 and 1, 51 00:02:45,840 --> 00:02:47,820 碰巧是不同的承接 and it happens that different successions 52 00:02:47,820 --> 00:02:51,363 让 0 和 1 呈现相同的打印字符。 of 0 and 1 render the same printed character. 53 00:02:52,380 --> 00:02:56,403 0 和 1 以 8 个为一组组成一个字节。 The 0 and 1 go in group of 8 to form a byte. 54 00:02:57,480 --> 00:03:00,690 然后计算机必须解码这个字节序列 The computer must then decode this sequence of bytes 55 00:03:00,690 --> 00:03:02,493 成一系列代码点。 into a sequence of code points. 56 00:03:04,530 --> 00:03:09,530 在我们的示例中,2 个字节通过 UTF-8 解码 In our example, the 2 bytes is decoded using UTF-8 57 00:03:09,900 --> 00:03:11,403 成一个单一的代码点。 into a single code point. 58 00:03:12,450 --> 00:03:15,090 然后 unicode 标准允许我们 The unicode standard then allows us 59 00:03:15,090 --> 00:03:18,191 找到与此代码点对应的字符, to find the character corresponding to this code point, 60 00:03:18,191 --> 00:03:20,283 c 音符。 the c cedilla. 61 00:03:21,499 --> 00:03:23,790 让我们重复同样的操作 Let's repeat the same operation 62 00:03:23,790 --> 00:03:26,577 有了这个由 3 个字节组成的新序列, with this new sequence composed of 3 bytes,. 63 00:03:27,420 --> 00:03:30,543 这次转化为两个码点, This time it is transformed into two code points, 64 00:03:31,410 --> 00:03:35,280 这也对应于 c cedilla 字符。 which also correspond to the c cedilla character. 65 00:03:35,280 --> 00:03:36,780 它实际上是组成 It is in fact the composition 66 00:03:36,780 --> 00:03:39,810 unicode 拉丁文小写字母 C of the unicode Latin Small Letter C 67 00:03:39,810 --> 00:03:42,240 和组合的音符。 and the combining cedilla. 68 00:03:42,240 --> 00:03:45,000 但这很烦人,因为在我们看来 But it's annoying because what appears to us 69 00:03:45,000 --> 00:03:46,680 为了成为一个单一的字符 to be a single character 70 00:03:46,680 --> 00:03:49,653 对于计算机来说完全不是一回事。 is not at all the same thing for the computer. 71 00:03:52,470 --> 00:03:57,240 幸好有 unicode 标准化标准 Fortunately, there are unicode standardization standards 72 00:03:57,240 --> 00:04:02,130 称为 NFC、NFD、NFKC 或 NFKD known as NFC, NFD, NFKC or NFKD 73 00:04:02,130 --> 00:04:04,893 这允许消除其中的一些差异。 that allow erasing some of these differences. 74 00:04:05,730 --> 00:04:08,223 这些标准通常由 tokenizer 使用。 These standards are often used by tokenizers. 75 00:04:09,900 --> 00:04:12,090 在所有这些前面的例子中, On all these previous examples, 76 00:04:12,090 --> 00:04:15,510 即使规范化改变了文本的外观, even if the normalizations changed the look of the text, 77 00:04:15,510 --> 00:04:17,970 他们没有改变内容; they did not change the content; 78 00:04:17,970 --> 00:04:19,177 你仍然可以阅读, you could still read, 79 00:04:19,177 --> 00:04:21,987 “Hello world,让我们规范一下这句话。” "Hello world, let's normalize this sentence." 80 00:04:22,980 --> 00:04:25,980 但是,你必须知道一些规范化 However, you must be aware that some normalizations 81 00:04:25,980 --> 00:04:30,363 如果他们不适配他们的语料库,可能会非常有害。 can be very harmful if they are not adapted to their corpus. 82 00:04:31,620 --> 00:04:34,387 例如,如果你使用法语句子, For example, if you take the French sentence, 83 00:04:34,387 --> 00:04:38,790 “Un pere indigne”,意思是 “愤怒的父亲”, "Un pere indigne," which means "An indignant father," 84 00:04:38,790 --> 00:04:42,510 并使用 bert-base-uncase tokenizer 对其进行规范化 and normalize it with the bert-base-uncase tokenizer 85 00:04:42,510 --> 00:04:44,313 这消除了口音, which removes the accent, 86 00:04:45,150 --> 00:04:48,000 然后句子变成 “Un pere indigne” then the sentence becomes "Un pere indigne" 87 00:04:48,000 --> 00:04:49,707 意思是 “一个不称职的父亲”。 which means "An unworthy father". 88 00:04:53,460 --> 00:04:56,760 如果你观看此视频是为了构建自己的 tokenizer , If you watched this video to build your own tokenizer, 89 00:04:56,760 --> 00:04:59,610 没有绝对的规则选择与否 there are no absolute rules to choose or not 90 00:04:59,610 --> 00:05:02,970 一个新 tokenizer 的规范化, a normalization for a new tokenizer, 91 00:05:02,970 --> 00:05:06,210 但我建议你花时间选择它们 but I advise you to take the time to select them 92 00:05:06,210 --> 00:05:10,743 这样它们就不会让你丢失重要信息。 so that they do not make you lose important information. 93 00:05:12,296 --> 00:05:14,879 (微妙的爆炸) (subtle blast)
course/subtitles/zh-CN/49_what-is-normalization.srt/0
{ "file_path": "course/subtitles/zh-CN/49_what-is-normalization.srt", "repo_id": "course", "token_count": 5043 }
165
1 00:00:05,580 --> 00:00:07,177 - 让我们研究如何针对问答场景 - Let's study how to preprocess a dataset 2 00:00:07,177 --> 00:00:08,643 预处理数据集。 for question answering. 3 00:00:10,200 --> 00:00:11,640 问答场景是在一些上下文环境下 Question answering is a task 4 00:00:11,640 --> 00:00:14,343 针对某个问题寻找其答案的应用场景。 of finding answers to a question in some context. 5 00:00:15,270 --> 00:00:17,550 例如,我们将使用 SQuAD 数据集 For example, we'll use the SQuAD dataset 6 00:00:17,550 --> 00:00:19,860 在其中我们删除了我们不会使用的列 in which we remove columns we won't use 7 00:00:19,860 --> 00:00:21,660 并针对标签提取 and just extract the information we will need 8 00:00:21,660 --> 00:00:22,950 我们需要的信息, for the labels, 9 00:00:22,950 --> 00:00:26,370 即上下文中答案的开始和结束。 the start and the end of the answer in the context. 10 00:00:26,370 --> 00:00:28,690 如果你有自己的问答数据集, If you have your own dataset for question answering, 11 00:00:28,690 --> 00:00:31,680 只要确保你清理数据以达到同一效果, just make sure you clean your data to get to the same point, 12 00:00:31,680 --> 00:00:33,900 其中一列包含问题, with one column containing the questions, 13 00:00:33,900 --> 00:00:35,940 一列包含上下文, one column containing the context, 14 00:00:35,940 --> 00:00:38,610 一列为上下文中的答案的 one column for the index of the start and end character 15 00:00:38,610 --> 00:00:40,473 起始和终止字符的索引。 of the answer in the context. 16 00:00:41,610 --> 00:00:44,520 请注意,答案必须是上下文的一部分。 Note that the answer must be part of the context. 17 00:00:44,520 --> 00:00:47,160 如果你想进行生成式问答, If you want to perform generative question answering, 18 00:00:47,160 --> 00:00:50,160 查看下面链接的视频序列之一。 look at one of the sequence to sequence videos linked below. 19 00:00:51,600 --> 00:00:53,430 现在,如果我们看一下 Now, if we have a look at the tokens 20 00:00:53,430 --> 00:00:54,750 我们将输入给我们的模型的词元, we will feed our model, 21 00:00:54,750 --> 00:00:58,320 我们会看到答案就在上下文中的某个地方。 we'll see the answer lies somewhere inside the context. 22 00:00:58,320 --> 00:01:01,080 对于很长的上下文,该答案可能 For very long context, that answer may get truncated 23 00:01:01,080 --> 00:01:02,580 会被分词器截断。 by the tokenizer. 24 00:01:02,580 --> 00:01:05,970 在这种情况下,我们的模型将没有任何合适的标签, In this case, we won't have any proper labels for our model, 25 00:01:05,970 --> 00:01:07,680 所以我们应该 so we should keep the truncated part 26 00:01:07,680 --> 00:01:10,203 保留截断的部分作为一个单独的功能而不是丢弃它。 as a separate feature instead of discarding it. 27 00:01:11,100 --> 00:01:12,990 我们唯一需要小心的是 The only thing we need to be careful with 28 00:01:12,990 --> 00:01:15,660 是允许单独的块之间有一些重叠 is to allow some overlap between separate chunks 29 00:01:15,660 --> 00:01:17,670 这样答案就不会被截断 so that the answer is not truncated 30 00:01:17,670 --> 00:01:19,920 使得包含答案的特征 and that the feature containing the answer 31 00:01:19,920 --> 00:01:22,623 获得足够的上下文来预测它。 gets sufficient context to be able to predict it. 32 00:01:23,490 --> 00:01:26,040 这是分词器如何完成的。 Here is how it can be done by the tokenizer. 33 00:01:26,040 --> 00:01:29,370 我们将问题、上下文传递给它, We pass it the question, context, set a truncation 34 00:01:29,370 --> 00:01:33,240 仅针对上下文设置截断,并且填充到最大长度。 for the context only, and the padding to the maximum length. 35 00:01:33,240 --> 00:01:35,340 stride 参数是我们针对重叠的词元 The stride argument is where we set the number 36 00:01:35,340 --> 00:01:36,900 设置数字的地方, of overlapping tokens, 37 00:01:36,900 --> 00:01:39,600 并且return_overflowing_tokens 等于 true and the return overflowing tokens equals true 38 00:01:39,600 --> 00:01:42,630 意味着我们不想丢弃截断的部分。 means we don't want to discard the truncated part. 39 00:01:42,630 --> 00:01:45,210 最后,我们还返回 return_offsets_mapping Lastly, we also return the offset mappings 40 00:01:45,210 --> 00:01:47,220 从而能够找到和答案的起始与结束相关的 to be able to find the tokens corresponding 41 00:01:47,220 --> 00:01:48,693 相应的词元。 to the answer start and end. 42 00:01:49,860 --> 00:01:52,290 我们想要保留这些词元,因为它们将成为标签 We want those tokens because they will be the labels 43 00:01:52,290 --> 00:01:53,970 我们将其传递给我们的模型。 we pass through our model. 44 00:01:53,970 --> 00:01:56,870 在独热编码版本中,这是它们的样子。 In a one-hot encoded version, here is what they look like. 45 00:01:57,930 --> 00:02:00,480 如果我们的上下文不包含答案, If the context we have does not contain the answer, 46 00:02:00,480 --> 00:02:03,799 我们将这两个标签设置为 CLS 词元的索引。 we set the two labels to the index of the CLS token. 47 00:02:03,799 --> 00:02:05,700 如果上下文仅部分包含答案, We also do this if the context 48 00:02:05,700 --> 00:02:07,713 我们也会这样做。 only partially contains the answer. 49 00:02:08,580 --> 00:02:11,400 在代码方面,这是我们如何做到的。 In terms of code, here is how we can do it. 50 00:02:11,400 --> 00:02:13,710 使用输入的序列 ID, Using the sequence IDs of an input, 51 00:02:13,710 --> 00:02:17,220 我们可以确定上下文的开始和结束。 we can determine the beginning and the end of the context. 52 00:02:17,220 --> 00:02:19,800 然后,我们就知道是否需要 Then, we know if we have to return to the CLS position 53 00:02:19,800 --> 00:02:22,290 根据两个标签返回 CLS 位置 for the two labels or we determine the position 54 00:02:22,290 --> 00:02:25,050 或者我们确定答案的第一个和最后一个词元的位置。 of the first and last tokens of the answer. 55 00:02:25,050 --> 00:02:27,800 我们可以在前面的示例中检查它是否正常发挥作用。 We can check it works properly on our previous example. 56 00:02:28,680 --> 00:02:31,380 把它们放在一起看起来就像这个大函数, Putting it all together looks like this big function, 57 00:02:31,380 --> 00:02:34,233 我们可以使用 map 方法将其应用于我们的数据集。 which we can apply to our datasets with the map method. 58 00:02:35,310 --> 00:02:37,920 由于我们在词元化过程中应用了填充操作, Since we applied padding during the tokenization, 59 00:02:37,920 --> 00:02:40,680 然后我们可以直接作为训练器使用 we can then use this directly as the trainer 60 00:02:40,680 --> 00:02:44,133 或者应用 to_tf_dataset 方法来使用 Keras.fit。 or apply the to_tf_dataset method to use Keras.fit.
course/subtitles/zh-CN/65_data-processing-for-question-answering.srt/0
{ "file_path": "course/subtitles/zh-CN/65_data-processing-for-question-answering.srt", "repo_id": "course", "token_count": 3506 }
166
import pandas as pd from youtube_transcript_api import YouTubeTranscriptApi from youtube_transcript_api.formatters import SRTFormatter from youtubesearchpython import Playlist from pathlib import Path import argparse COURSE_VIDEOS_PLAYLIST = "https://youtube.com/playlist?list=PLo2EIpI_JMQvWfQndUesu0nPBAtZ9gP1o" TASK_VIDEOS_PLAYLIST = "https://youtube.com/playlist?list=PLo2EIpI_JMQtyEr-sLJSy5_SnLCb4vtQf" # These videos are not part of the course, but are part of the task playlist TASK_VIDEOS_TO_SKIP = ["tjAIM7BOYhw", "WdAeKSOpxhw", "KWwzcmG98Ds", "TksaY_FDgnk", "leNG9fN9FQU", "dKE8SIt9C-w"] def generate_subtitles(language: str, youtube_language_code: str = None, is_task_playlist: bool = False): metadata = [] formatter = SRTFormatter() path = Path(f"subtitles/{language}") path.mkdir(parents=True, exist_ok=True) if is_task_playlist: playlist_videos = Playlist.getVideos(TASK_VIDEOS_PLAYLIST) else: playlist_videos = Playlist.getVideos(COURSE_VIDEOS_PLAYLIST) for idx, video in enumerate(playlist_videos["videos"]): video_id = video["id"] title = video["title"] title_formatted = title.lower().replace(" ", "-").replace(":", "").replace("?", "") id_str = f"{idx}".zfill(2) if is_task_playlist: srt_filename = f"{path}/tasks_{id_str}_{title_formatted}.srt" else: srt_filename = f"{path}/{id_str}_{title_formatted}.srt" # Skip course events if "Event Day" in title: continue # Skip task videos that don't belong to the course if video_id in TASK_VIDEOS_TO_SKIP: continue # Get transcript transcript_list = YouTubeTranscriptApi.list_transcripts(video_id) english_transcript = transcript_list.find_transcript(language_codes=["en", "en-US"]) languages = pd.DataFrame(english_transcript.translation_languages)["language_code"].tolist() # Map mismatched language codes if language not in languages: if youtube_language_code is None: raise ValueError( f"Language code {language} not found in YouTube's list of supported language: {languages}. Please provide a value for `youtube_language_code` and try again." ) language_code = youtube_language_code else: language_code = language try: translated_transcript = english_transcript.translate(language_code) translated_transcript = translated_transcript.fetch() srt_formatted = formatter.format_transcript(translated_transcript) with open(srt_filename, "w", encoding="utf-8") as f: f.write(srt_formatted) except: print(f"Problem generating transcript for {title} with ID {video_id} at {video['link']}.") with open(srt_filename, "w", encoding="utf-8") as f: f.write("No transcript found for this video!") metadata.append({"id": video_id, "title": title, "link": video["link"], "srt_filename": srt_filename}) df = pd.DataFrame(metadata) if is_task_playlist: df.to_csv(f"{path}/metadata_tasks.csv", index=False) else: df.to_csv(f"{path}/metadata.csv", index=False) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--language", type=str, help="Language to generate subtitles for") parser.add_argument("--youtube_language_code", type=str, help="YouTube language code") args = parser.parse_args() generate_subtitles(args.language, args.youtube_language_code, is_task_playlist=False) generate_subtitles(args.language, args.youtube_language_code, is_task_playlist=True) print(f"All done! Subtitles stored at subtitles/{args.language}")
course/utils/generate_subtitles.py/0
{ "file_path": "course/utils/generate_subtitles.py", "repo_id": "course", "token_count": 1604 }
167
repos: - repo: https://github.com/charliermarsh/ruff-pre-commit # https://github.com/charliermarsh/ruff#usage rev: 'v0.3.0' hooks: # Run the linter. - id: ruff args: [ --fix ] # Run the formatter. - id: ruff-format
datasets/.pre-commit-config.yaml/0
{ "file_path": "datasets/.pre-commit-config.yaml", "repo_id": "datasets", "token_count": 122 }
168
import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration SPEED_TEST_N_EXAMPLES = 500_000 RESULTS_BASEPATH, RESULTS_FILENAME = os.path.split(__file__) RESULTS_FILE_PATH = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json")) @get_duration def map(dataset: datasets.Dataset, **kwargs): _ = dataset.map(**kwargs) @get_duration def filter(dataset: datasets.Dataset, **kwargs): _ = dataset.filter(**kwargs) def benchmark_map_filter(): times = {"num examples": SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: features = datasets.Features({"text": datasets.Value("string"), "numbers": datasets.Value("float32")}) dataset = generate_example_dataset( os.path.join(tmp_dir, "dataset.arrow"), features, num_examples=SPEED_TEST_N_EXAMPLES ) tokenizer = transformers.AutoTokenizer.from_pretrained("bert-base-cased", use_fast=True) def tokenize(examples): return tokenizer(examples["text"]) times["map identity"] = map(dataset) times["map identity batched"] = map(dataset, batched=True) times["map no-op batched"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="numpy"): times["map no-op batched numpy"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="pandas"): times["map no-op batched pandas"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="torch", columns="numbers"): times["map no-op batched pytorch"] = map(dataset, function=lambda x: None, batched=True) with dataset.formatted_as(type="tensorflow", columns="numbers"): times["map no-op batched tensorflow"] = map(dataset, function=lambda x: None, batched=True) times["map fast-tokenizer batched"] = map(dataset, function=tokenize, batched=True) times["filter"] = filter(dataset) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(RESULTS_FILE_PATH, "wb") as f: f.write(json.dumps(times).encode("utf-8")) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
datasets/benchmarks/benchmark_map_filter.py/0
{ "file_path": "datasets/benchmarks/benchmark_map_filter.py", "repo_id": "datasets", "token_count": 996 }
169
# Build and load Nearly every deep learning workflow begins with loading a dataset, which makes it one of the most important steps. With 🤗 Datasets, there are more than 900 datasets available to help you get started with your NLP task. All you have to do is call: [`load_dataset`] to take your first step. This function is a true workhorse in every sense because it builds and loads every dataset you use. ## ELI5: `load_dataset` Let's begin with a basic Explain Like I'm Five. A dataset is a directory that contains: - Some data files in generic formats (JSON, CSV, Parquet, text, etc.) - A dataset card named `README.md` that contains documentation about the dataset as well as a YAML header to define the datasets tags and configurations - An optional dataset script if it requires some code to read the data files. This is sometimes used to load files of specific formats and structures. The [`load_dataset`] function fetches the requested dataset locally or from the Hugging Face Hub. The Hub is a central repository where all the Hugging Face datasets and models are stored. If the dataset only contains data files, then [`load_dataset`] automatically infers how to load the data files from their extensions (json, csv, parquet, txt, etc.). Under the hood, 🤗 Datasets will use an appropriate [`DatasetBuilder`] based on the data files format. There exist one builder per data file format in 🤗 Datasets: * [`datasets.packaged_modules.text.Text`] for text * [`datasets.packaged_modules.csv.Csv`] for CSV and TSV * [`datasets.packaged_modules.json.Json`] for JSON and JSONL * [`datasets.packaged_modules.parquet.Parquet`] for Parquet * [`datasets.packaged_modules.arrow.Arrow`] for Arrow (streaming file format) * [`datasets.packaged_modules.sql.Sql`] for SQL databases * [`datasets.packaged_modules.imagefolder.ImageFolder`] for image folders * [`datasets.packaged_modules.audiofolder.AudioFolder`] for audio folders If the dataset has a dataset script, then it downloads and imports it from the Hugging Face Hub. Code in the dataset script defines a custom [`DatasetBuilder`] the dataset information (description, features, URL to the original files, etc.), and tells 🤗 Datasets how to generate and display examples from it. <Tip> Read the [Share](./upload_dataset) section to learn more about how to share a dataset. This section also provides a step-by-step guide on how to write your own dataset loading script! </Tip> 🤗 Datasets downloads the dataset files from the original URL, generates the dataset and caches it in an Arrow table on your drive. If you've downloaded the dataset before, then 🤗 Datasets will reload it from the cache to save you the trouble of downloading it again. Now that you have a high-level understanding about how datasets are built, let's take a closer look at the nuts and bolts of how all this works. ## Building a dataset When you load a dataset for the first time, 🤗 Datasets takes the raw data file and builds it into a table of rows and typed columns. There are two main classes responsible for building a dataset: [`BuilderConfig`] and [`DatasetBuilder`]. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/builderconfig.png"/> </div> ### BuilderConfig[[datasets-builderconfig]] [`BuilderConfig`] is the configuration class of [`DatasetBuilder`]. The [`BuilderConfig`] contains the following basic attributes about a dataset: | Attribute | Description | |---------------|--------------------------------------------------------------| | `name` | Short name of the dataset. | | `version` | Dataset version identifier. | | `data_dir` | Stores the path to a local folder containing the data files. | | `data_files` | Stores paths to local data files. | | `description` | Description of the dataset. | If you want to add additional attributes to your dataset such as the class labels, you can subclass the base [`BuilderConfig`] class. There are two ways to populate the attributes of a [`BuilderConfig`] class or subclass: - Provide a list of predefined [`BuilderConfig`] class (or subclass) instances in the datasets [`DatasetBuilder.BUILDER_CONFIGS`] attribute. - When you call [`load_dataset`], any keyword arguments that are not specific to the method will be used to set the associated attributes of the [`BuilderConfig`] class. This will override the predefined attributes if a specific configuration was selected. You can also set the [`DatasetBuilder.BUILDER_CONFIG_CLASS`] to any custom subclass of [`BuilderConfig`]. ### DatasetBuilder[[datasets-datasetbuilder]] [`DatasetBuilder`] accesses all the attributes inside [`BuilderConfig`] to build the actual dataset. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/datasetbuilder.png"/> </div> There are three main methods in [`DatasetBuilder`]: 1. [`DatasetBuilder._info`] is in charge of defining the dataset attributes. When you call `dataset.info`, 🤗 Datasets returns the information stored here. Likewise, the [`Features`] are also specified here. Remember, the [`Features`] are like the skeleton of the dataset. It provides the names and types of each column. 2. [`DatasetBuilder._split_generator`] downloads or retrieves the requested data files, organizes them into splits, and defines specific arguments for the generation process. This method has a [`DownloadManager`] that downloads files or fetches them from your local filesystem. Within the [`DownloadManager`], there is a [`DownloadManager.download_and_extract`] method that accepts a dictionary of URLs to the original data files, and downloads the requested files. Accepted inputs include: a single URL or path, or a list/dictionary of URLs or paths. Any compressed file types like TAR, GZIP and ZIP archives will be automatically extracted. Once the files are downloaded, [`SplitGenerator`] organizes them into splits. The [`SplitGenerator`] contains the name of the split, and any keyword arguments that are provided to the [`DatasetBuilder._generate_examples`] method. The keyword arguments can be specific to each split, and typically comprise at least the local path to the data files for each split. 3. [`DatasetBuilder._generate_examples`] reads and parses the data files for a split. Then it yields dataset examples according to the format specified in the `features` from [`DatasetBuilder._info`]. The input of [`DatasetBuilder._generate_examples`] is actually the `filepath` provided in the keyword arguments of the last method. The dataset is generated with a Python generator, which doesn't load all the data in memory. As a result, the generator can handle large datasets. However, before the generated samples are flushed to the dataset file on disk, they are stored in an `ArrowWriter` buffer. This means the generated samples are written by batch. If your dataset samples consumes a lot of memory (images or videos), then make sure to specify a low value for the `DEFAULT_WRITER_BATCH_SIZE` attribute in [`DatasetBuilder`]. We recommend not exceeding a size of 200 MB. ## Maintaining integrity To ensure a dataset is complete, [`load_dataset`] will perform a series of tests on the downloaded files to make sure everything is there. This way, you don't encounter any surprises when your requested dataset doesn't get generated as expected. [`load_dataset`] verifies: - The number of splits in the generated `DatasetDict`. - The number of samples in each split of the generated `DatasetDict`. - The list of downloaded files. - The SHA256 checksums of the downloaded files (disabled by defaut). If the dataset doesn't pass the verifications, it is likely that the original host of the dataset made some changes in the data files. <Tip> If it is your own dataset, you'll need to recompute the information above and update the `README.md` file in your dataset repository. Take a look at this [section](dataset_script#optional-generate-dataset-metadata) to learn how to generate and update this metadata. </Tip> In this case, an error is raised to alert that the dataset has changed. To ignore the error, one needs to specify `verification_mode="no_checks"` in [`load_dataset`]. Anytime you see a verification error, feel free to open a discussion or pull request in the corresponding dataset "Community" tab, so that the integrity checks for that dataset are updated. ## Security The dataset repositories on the Hub are scanned for malware, see more information [here](https://huggingface.co/docs/hub/security#malware-scanning). Moreover the datasets without a namespace (originally contributed on our GitHub repository) have all been reviewed by our maintainers. The code of these datasets is considered **safe**. It concerns datasets that are not under a namespace, e.g. "squad" or "glue", unlike the other datasets that are named "username/dataset_name" or "org/dataset_name".
datasets/docs/source/about_dataset_load.mdx/0
{ "file_path": "datasets/docs/source/about_dataset_load.mdx", "repo_id": "datasets", "token_count": 2537 }
170
# Cloud storage 🤗 Datasets supports access to cloud storage providers through a `fsspec` FileSystem implementations. You can save and load datasets from any cloud storage in a Pythonic way. Take a look at the following table for some example of supported cloud storage providers: | Storage provider | Filesystem implementation | |----------------------|---------------------------------------------------------------| | Amazon S3 | [s3fs](https://s3fs.readthedocs.io/en/latest/) | | Google Cloud Storage | [gcsfs](https://gcsfs.readthedocs.io/en/latest/) | | Azure Blob/DataLake | [adlfs](https://github.com/fsspec/adlfs) | | Dropbox | [dropboxdrivefs](https://github.com/MarineChap/dropboxdrivefs)| | Google Drive | [gdrivefs](https://github.com/intake/gdrivefs) | | Oracle Cloud Storage | [ocifs](https://ocifs.readthedocs.io/en/latest/) | This guide will show you how to save and load datasets with any cloud storage. Here are examples for S3, Google Cloud Storage, Azure Blob Storage, and Oracle Cloud Object Storage. ## Set up your cloud storage FileSystem ### Amazon S3 1. Install the S3 FileSystem implementation: ``` >>> pip install s3fs ``` 2. Define your credentials To use an anonymous connection, use `anon=True`. Otherwise, include your `aws_access_key_id` and `aws_secret_access_key` whenever you are interacting with a private S3 bucket. ```py >>> storage_options = {"anon": True} # for anonymous connection # or use your credentials >>> storage_options = {"key": aws_access_key_id, "secret": aws_secret_access_key} # for private buckets # or use a botocore session >>> import aiobotocore.session >>> s3_session = aiobotocore.session.AioSession(profile="my_profile_name") >>> storage_options = {"session": s3_session} ``` 3. Create your FileSystem instance ```py >>> import s3fs >>> fs = s3fs.S3FileSystem(**storage_options) ``` ### Google Cloud Storage 1. Install the Google Cloud Storage implementation: ``` >>> conda install -c conda-forge gcsfs # or install with pip >>> pip install gcsfs ``` 2. Define your credentials ```py >>> storage_options={"token": "anon"} # for anonymous connection # or use your credentials of your default gcloud credentials or from the google metadata service >>> storage_options={"project": "my-google-project"} # or use your credentials from elsewhere, see the documentation at https://gcsfs.readthedocs.io/ >>> storage_options={"project": "my-google-project", "token": TOKEN} ``` 3. Create your FileSystem instance ```py >>> import gcsfs >>> fs = gcsfs.GCSFileSystem(**storage_options) ``` ### Azure Blob Storage 1. Install the Azure Blob Storage implementation: ``` >>> conda install -c conda-forge adlfs # or install with pip >>> pip install adlfs ``` 2. Define your credentials ```py >>> storage_options = {"anon": True} # for anonymous connection # or use your credentials >>> storage_options = {"account_name": ACCOUNT_NAME, "account_key": ACCOUNT_KEY} # gen 2 filesystem # or use your credentials with the gen 1 filesystem >>> storage_options={"tenant_id": TENANT_ID, "client_id": CLIENT_ID, "client_secret": CLIENT_SECRET} ``` 3. Create your FileSystem instance ```py >>> import adlfs >>> fs = adlfs.AzureBlobFileSystem(**storage_options) ``` ### Oracle Cloud Object Storage 1. Install the OCI FileSystem implementation: ``` >>> pip install ocifs ``` 2. Define your credentials ```py >>> storage_options = {"config": "~/.oci/config", "region": "us-ashburn-1"} ``` 3. Create your FileSystem instance ```py >>> import ocifs >>> fs = ocifs.OCIFileSystem(**storage_options) ``` ## Load and Save your datasets using your cloud storage FileSystem ### Download and prepare a dataset into a cloud storage You can download and prepare a dataset into your cloud storage by specifying a remote `output_dir` in `download_and_prepare`. Don't forget to use the previously defined `storage_options` containing your credentials to write into a private cloud storage. The `download_and_prepare` method works in two steps: 1. it first downloads the raw data files (if any) in your local cache. You can set your cache directory by passing `cache_dir` to [`load_dataset_builder`] 2. then it generates the dataset in Arrow or Parquet format in your cloud storage by iterating over the raw data files. Load a dataset builder from the Hugging Face Hub (see [how to load from the Hugging Face Hub](./loading#hugging-face-hub)): ```py >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("imdb") >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` Load a dataset builder using a loading script (see [how to load a local loading script](./loading#local-loading-script)): ```py >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("path/to/local/loading_script/loading_script.py") >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` Use your own data files (see [how to load local and remote files](./loading#local-and-remote-files)): ```py >>> data_files = {"train": ["path/to/train.csv"]} >>> output_dir = "s3://my-bucket/imdb" >>> builder = load_dataset_builder("csv", data_files=data_files) >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet") ``` It is highly recommended to save the files as compressed Parquet files to optimize I/O by specifying `file_format="parquet"`. Otherwise the dataset is saved as an uncompressed Arrow file. You can also specify the size of the shards using `max_shard_size` (default is 500MB): ```py >>> builder.download_and_prepare(output_dir, storage_options=storage_options, file_format="parquet", max_shard_size="1GB") ``` #### Dask Dask is a parallel computing library and it has a pandas-like API for working with larger than memory Parquet datasets in parallel. Dask can use multiple threads or processes on a single machine, or a cluster of machines to process data in parallel. Dask supports local data but also data from a cloud storage. Therefore you can load a dataset saved as sharded Parquet files in Dask with ```py import dask.dataframe as dd df = dd.read_parquet(output_dir, storage_options=storage_options) # or if your dataset is split into train/valid/test df_train = dd.read_parquet(output_dir + f"/{builder.name}-train-*.parquet", storage_options=storage_options) df_valid = dd.read_parquet(output_dir + f"/{builder.name}-validation-*.parquet", storage_options=storage_options) df_test = dd.read_parquet(output_dir + f"/{builder.name}-test-*.parquet", storage_options=storage_options) ``` You can find more about dask dataframes in their [documentation](https://docs.dask.org/en/stable/dataframe.html). ## Saving serialized datasets After you have processed your dataset, you can save it to your cloud storage with [`Dataset.save_to_disk`]: ```py # saves encoded_dataset to amazon s3 >>> encoded_dataset.save_to_disk("s3://my-private-datasets/imdb/train", storage_options=storage_options) # saves encoded_dataset to google cloud storage >>> encoded_dataset.save_to_disk("gcs://my-private-datasets/imdb/train", storage_options=storage_options) # saves encoded_dataset to microsoft azure blob/datalake >>> encoded_dataset.save_to_disk("adl://my-private-datasets/imdb/train", storage_options=storage_options) ``` <Tip> Remember to define your credentials in your [FileSystem instance](#set-up-your-cloud-storage-filesystem) `fs` whenever you are interacting with a private cloud storage. </Tip> ## Listing serialized datasets List files from a cloud storage with your FileSystem instance `fs`, using `fs.ls`: ```py >>> fs.ls("my-private-datasets/imdb/train", detail=False) ["dataset_info.json.json","dataset.arrow","state.json"] ``` ### Load serialized datasets When you are ready to use your dataset again, reload it with [`Dataset.load_from_disk`]: ```py >>> from datasets import load_from_disk # load encoded_dataset from cloud storage >>> dataset = load_from_disk("s3://a-public-datasets/imdb/train", storage_options=storage_options) >>> print(len(dataset)) 25000 ```
datasets/docs/source/filesystems.mdx/0
{ "file_path": "datasets/docs/source/filesystems.mdx", "repo_id": "datasets", "token_count": 2640 }
171
# Object detection Object detection models identify something in an image, and object detection datasets are used for applications such as autonomous driving and detecting natural hazards like wildfire. This guide will show you how to apply transformations to an object detection dataset following the [tutorial](https://albumentations.ai/docs/examples/example_bboxes/) from [Albumentations](https://albumentations.ai/docs/). To run these examples, make sure you have up-to-date versions of `albumentations` and `cv2` installed: ``` pip install -U albumentations opencv-python ``` In this example, you'll use the [`cppe-5`](https://huggingface.co/datasets/cppe-5) dataset for identifying medical personal protective equipment (PPE) in the context of the COVID-19 pandemic. Load the dataset and take a look at an example: ```py >>> from datasets import load_dataset >>> ds = load_dataset("cppe-5") >>> example = ds['train'][0] >>> example {'height': 663, 'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=943x663 at 0x7FC3DC756250>, 'image_id': 15, 'objects': {'area': [3796, 1596, 152768, 81002], 'bbox': [[302.0, 109.0, 73.0, 52.0], [810.0, 100.0, 57.0, 28.0], [160.0, 31.0, 248.0, 616.0], [741.0, 68.0, 202.0, 401.0]], 'category': [4, 4, 0, 0], 'id': [114, 115, 116, 117]}, 'width': 943} ``` The dataset has the following fields: - `image`: PIL.Image.Image object containing the image. - `image_id`: The image ID. - `height`: The image height. - `width`: The image width. - `objects`: A dictionary containing bounding box metadata for the objects in the image: - `id`: The annotation id. - `area`: The area of the bounding box. - `bbox`: The object's bounding box (in the [coco](https://albumentations.ai/docs/getting_started/bounding_boxes_augmentation/#coco) format). - `category`: The object's category, with possible values including `Coverall (0)`, `Face_Shield (1)`, `Gloves (2)`, `Goggles (3)` and `Mask (4)`. You can visualize the `bboxes` on the image using some internal torch utilities. To do that, you will need to reference the [`~datasets.ClassLabel`] feature associated with the category IDs so you can look up the string labels: ```py >>> import torch >>> from torchvision.ops import box_convert >>> from torchvision.utils import draw_bounding_boxes >>> from torchvision.transforms.functional import pil_to_tensor, to_pil_image >>> categories = ds['train'].features['objects'].feature['category'] >>> boxes_xywh = torch.tensor(example['objects']['bbox']) >>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') >>> labels = [categories.int2str(x) for x in example['objects']['category']] >>> to_pil_image( ... draw_bounding_boxes( ... pil_to_tensor(example['image']), ... boxes_xyxy, ... colors="red", ... labels=labels, ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example.png"> </div> With `albumentations`, you can apply transforms that will affect the image while also updating the `bboxes` accordingly. In this case, the image is resized to (480, 480), flipped horizontally, and brightened. ```py >>> import albumentations >>> import numpy as np >>> transform = albumentations.Compose([ ... albumentations.Resize(480, 480), ... albumentations.HorizontalFlip(p=1.0), ... albumentations.RandomBrightnessContrast(p=1.0), ... ], bbox_params=albumentations.BboxParams(format='coco', label_fields=['category'])) >>> image = np.array(example['image']) >>> out = transform( ... image=image, ... bboxes=example['objects']['bbox'], ... category=example['objects']['category'], ... ) ``` Now when you visualize the result, the image should be flipped, but the `bboxes` should still be in the right places. ```py >>> image = torch.tensor(out['image']).permute(2, 0, 1) >>> boxes_xywh = torch.stack([torch.tensor(x) for x in out['bboxes']]) >>> boxes_xyxy = box_convert(boxes_xywh, 'xywh', 'xyxy') >>> labels = [categories.int2str(x) for x in out['category']] >>> to_pil_image( ... draw_bounding_boxes( ... image, ... boxes_xyxy, ... colors='red', ... labels=labels ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example_transformed.png"> </div> Create a function to apply the transform to a batch of examples: ```py >>> def transforms(examples): ... images, bboxes, categories = [], [], [] ... for image, objects in zip(examples['image'], examples['objects']): ... image = np.array(image.convert("RGB")) ... out = transform( ... image=image, ... bboxes=objects['bbox'], ... category=objects['category'] ... ) ... images.append(torch.tensor(out['image']).permute(2, 0, 1)) ... bboxes.append(torch.tensor(out['bboxes'])) ... categories.append(out['category']) ... return {'image': images, 'bbox': bboxes, 'category': categories} ``` Use the [`~Dataset.set_transform`] function to apply the transform on-the-fly which consumes less disk space. The randomness of data augmentation may return a different image if you access the same example twice. It is especially useful when training a model for several epochs. ```py >>> ds['train'].set_transform(transforms) ``` You can verify the transform works by visualizing the 10th example: ```py >>> example = ds['train'][10] >>> to_pil_image( ... draw_bounding_boxes( ... example['image'], ... box_convert(example['bbox'], 'xywh', 'xyxy'), ... colors='red', ... labels=[categories.int2str(x) for x in example['category']] ... ) ... ) ``` <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/visualize_detection_example_transformed_2.png"> </div> <Tip> Now that you know how to process a dataset for object detection, learn [how to train an object detection model](https://colab.research.google.com/github/NielsRogge/Transformers-Tutorials/blob/master/YOLOS/Fine_tuning_YOLOS_for_object_detection_on_custom_dataset_(balloon).ipynb) and use it for inference. </Tip>
datasets/docs/source/object_detection.mdx/0
{ "file_path": "datasets/docs/source/object_detection.mdx", "repo_id": "datasets", "token_count": 2299 }
172
# Share a dataset to the Hub The [Hub](https://huggingface.co/datasets) is home to an extensive collection of community-curated and popular research datasets. We encourage you to share your dataset to the Hub to help grow the ML community and accelerate progress for everyone. All contributions are welcome; adding a dataset is just a drag and drop away! Start by [creating a Hugging Face Hub account](https://huggingface.co/join) if you don't have one yet. ## Upload with the Hub UI The Hub's web-based interface allows users without any developer experience to upload a dataset. ### Create a repository A repository hosts all your dataset files, including the revision history, making storing more than one dataset version possible. 1. Click on your profile and select **New Dataset** to create a new dataset repository. 2. Pick a name for your dataset, and choose whether it is a public or private dataset. A public dataset is visible to anyone, whereas a private dataset can only be viewed by you or members of your organization. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/create_repo.png"/> </div> ### Upload dataset 1. Once you've created a repository, navigate to the **Files and versions** tab to add a file. Select **Add file** to upload your dataset files. We support many text, audio, and image data extensions such as `.csv`, `.mp3`, and `.jpg` among many others. For text data extensions like `.csv`, `.json`, `.jsonl`, and `.txt`, we recommend compressing them before uploading to the Hub (to `.zip` or `.gz` file extension for example). Text file extensions are not tracked by Git LFS by default, and if they're greater than 10MB, they will not be committed and uploaded. Take a look at the `.gitattributes` file in your repository for a complete list of tracked file extensions. For this tutorial, you can use the following sample `.csv` files since they're small: <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/train.csv" download>train.csv</a>, <a href="https://huggingface.co/datasets/stevhliu/demo/raw/main/test.csv" download>test.csv</a>. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/upload_files.png"/> </div> 2. Drag and drop your dataset files and add a brief descriptive commit message. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/commit_files.png"/> </div> 3. After uploading your dataset files, they are stored in your dataset repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/files_stored.png"/> </div> ### Create a Dataset card Adding a Dataset card is super valuable for helping users find your dataset and understand how to use it responsibly. 1. Click on **Create Dataset Card** to create a Dataset card. This button creates a `README.md` file in your repository. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/dataset_card.png"/> </div> 2. At the top, you'll see the **Metadata UI** with several fields to select from like license, language, and task categories. These are the most important tags for helping users discover your dataset on the Hub. When you select an option from each field, they'll be automatically added to the top of the dataset card. You can also look at the [Dataset Card specifications](https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1), which has a complete set of (but not required) tag options like `annotations_creators`, to help you choose the appropriate tags. <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/datasets/metadata_ui.png"/> </div> 3. Click on the **Import dataset card template** link at the top of the editor to automatically create a dataset card template. Filling out the template is a great way to introduce your dataset to the community and help users understand how to use it. For a detailed example of what a good Dataset card should look like, take a look at the [CNN DailyMail Dataset card](https://huggingface.co/datasets/cnn_dailymail). ### Load dataset Once your dataset is stored on the Hub, anyone can load it with the [`load_dataset`] function: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") ``` ## Upload with Python Users who prefer to upload a dataset programmatically can use the [huggingface_hub](https://huggingface.co/docs/huggingface_hub/index) library. This library allows users to interact with the Hub from Python. 1. Begin by installing the library: ```bash pip install huggingface_hub ``` 2. To upload a dataset on the Hub in Python, you need to log in to your Hugging Face account: ```bash huggingface-cli login ``` 3. Use the [`push_to_hub()`](https://huggingface.co/docs/datasets/main/en/package_reference/main_classes#datasets.DatasetDict.push_to_hub) function to help you add, commit, and push a file to your repository: ```py >>> from datasets import load_dataset >>> dataset = load_dataset("stevhliu/demo") # dataset = dataset.map(...) # do all your processing here >>> dataset.push_to_hub("stevhliu/processed_demo") ``` To set your dataset as private, set the `private` parameter to `True`. This parameter will only work if you are creating a repository for the first time. ```py >>> dataset.push_to_hub("stevhliu/private_processed_demo", private=True) ``` To add a new configuration (or subset) to a dataset or to add a new split (train/validation/test), please refer to the [`Dataset.push_to_hub`] documentation. ### Privacy A private dataset is only accessible by you. Similarly, if you share a dataset within your organization, then members of the organization can also access the dataset. Load a private dataset by providing your authentication token to the `token` parameter: ```py >>> from datasets import load_dataset # Load a private individual dataset >>> dataset = load_dataset("stevhliu/demo", token=True) # Load a private organization dataset >>> dataset = load_dataset("organization/dataset_name", token=True) ``` ## What's next? Congratulations, you've completed the tutorials! 🥳 From here, you can go on to: - Learn more about how to use 🤗 Datasets other functions to [process your dataset](process). - [Stream large datasets](stream) without downloading it locally. - [Define your dataset splits and configurations](repository_structure) or [loading script](dataset_script) and share your dataset with the community. If you have any questions about 🤗 Datasets, feel free to join and ask the community on our [forum](https://discuss.huggingface.co/c/datasets/10).
datasets/docs/source/upload_dataset.mdx/0
{ "file_path": "datasets/docs/source/upload_dataset.mdx", "repo_id": "datasets", "token_count": 2010 }
173
# Metric Card for SQuAD v2 ## Metric description This metric wraps the official scoring script for version 2 of the [Stanford Question Answering Dataset (SQuAD)](https://huggingface.co/datasets/squad_v2). SQuAD is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable. SQuAD 2.0 combines the 100,000 questions in SQuAD 1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. ## How to use The metric takes two files or two lists - one representing model predictions and the other the references to compare them to. *Predictions* : List of triple for question-answers to score with the following key-value pairs: * `'id'`: the question-answer identification field of the question and answer pair * `'prediction_text'` : the text of the answer * `'no_answer_probability'` : the probability that the question has no answer *References*: List of question-answers dictionaries with the following key-value pairs: * `'id'`: id of the question-answer pair (see above), * `'answers'`: a list of Dict {'text': text of the answer as a string} * `'no_answer_threshold'`: the probability threshold to decide that a question has no answer. ```python from datasets import load_metric squad_metric = load_metric("squad_v2") results = squad_metric.compute(predictions=predictions, references=references) ``` ## Output values This metric outputs a dictionary with 13 values: * `'exact'`: Exact match (the normalized answer exactly match the gold answer) (see the `exact_match` metric (forthcoming)) * `'f1'`: The average F1-score of predicted tokens versus the gold answer (see the [F1 score](https://huggingface.co/metrics/f1) metric) * `'total'`: Number of scores considered * `'HasAns_exact'`: Exact match (the normalized answer exactly match the gold answer) * `'HasAns_f1'`: The F-score of predicted tokens versus the gold answer * `'HasAns_total'`: How many of the questions have answers * `'NoAns_exact'`: Exact match (the normalized answer exactly match the gold answer) * `'NoAns_f1'`: The F-score of predicted tokens versus the gold answer * `'NoAns_total'`: How many of the questions have no answers * `'best_exact'` : Best exact match (with varying threshold) * `'best_exact_thresh'`: No-answer probability threshold associated to the best exact match * `'best_f1'`: Best F1 score (with varying threshold) * `'best_f1_thresh'`: No-answer probability threshold associated to the best F1 The range of `exact_match` is 0-100, where 0.0 means no answers were matched and 100.0 means all answers were matched. The range of `f1` is 0-1 -- its lowest possible value is 0, if either the precision or the recall is 0, and its highest possible value is 1.0, which means perfect precision and recall. The range of `total` depends on the length of predictions/references: its minimal value is 0, and maximal value is the total number of questions in the predictions and references. ### Values from popular papers The [SQuAD v2 paper](https://arxiv.org/pdf/1806.03822.pdf) reported an F1 score of 66.3% and an Exact Match score of 63.4%. They also report that human performance on the dataset represents an F1 score of 89.5% and an Exact Match score of 86.9%. For more recent model performance, see the [dataset leaderboard](https://paperswithcode.com/dataset/squad). ## Examples Maximal values for both exact match and F1 (perfect match): ```python from datasets import load_metric squad_v2_ metric = load_metric("squad_v2") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 100.0, 'f1': 100.0, 'total': 1, 'HasAns_exact': 100.0, 'HasAns_f1': 100.0, 'HasAns_total': 1, 'best_exact': 100.0, 'best_exact_thresh': 0.0, 'best_f1': 100.0, 'best_f1_thresh': 0.0} ``` Minimal values for both exact match and F1 (no match): ```python from datasets import load_metric squad_metric = load_metric("squad_v2") predictions = [{'prediction_text': '1999', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 0.0, 'f1': 0.0, 'total': 1, 'HasAns_exact': 0.0, 'HasAns_f1': 0.0, 'HasAns_total': 1, 'best_exact': 0.0, 'best_exact_thresh': 0.0, 'best_f1': 0.0, 'best_f1_thresh': 0.0} ``` Partial match (2 out of 3 answers correct) : ```python from datasets import load_metric squad_metric = load_metric("squad_v2") predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22', 'no_answer_probability': 0.}, {'prediction_text': 'Beyonce', 'id': '56d2051ce7d4791d0090260b', 'no_answer_probability': 0.}, {'prediction_text': 'climate change', 'id': '5733b5344776f419006610e1', 'no_answer_probability': 0.}] references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}, {'answers': {'answer_start': [233], 'text': ['Beyoncé and Bruno Mars']}, 'id': '56d2051ce7d4791d0090260b'}, {'answers': {'answer_start': [891], 'text': ['climate change']}, 'id': '5733b5344776f419006610e1'}] results = squad_v2_metric.compute(predictions=predictions, references=references) results {'exact': 66.66666666666667, 'f1': 66.66666666666667, 'total': 3, 'HasAns_exact': 66.66666666666667, 'HasAns_f1': 66.66666666666667, 'HasAns_total': 3, 'best_exact': 66.66666666666667, 'best_exact_thresh': 0.0, 'best_f1': 66.66666666666667, 'best_f1_thresh': 0.0} ``` ## Limitations and bias This metric works only with the datasets in the same format as the [SQuAD v.2 dataset](https://huggingface.co/datasets/squad_v2). The SQuAD datasets do contain a certain amount of noise, such as duplicate questions as well as missing answers, but these represent a minority of the 100,000 question-answer pairs. Also, neither exact match nor F1 score reflect whether models do better on certain types of questions (e.g. who questions) or those that cover a certain gender or geographical area -- carrying out more in-depth error analysis can complement these numbers. ## Citation ```bibtex @inproceedings{Rajpurkar2018SQuAD2, title={Know What You Don't Know: Unanswerable Questions for SQuAD}, author={Pranav Rajpurkar and Jian Zhang and Percy Liang}, booktitle={ACL 2018}, year={2018} } ``` ## Further References - [The Stanford Question Answering Dataset: Background, Challenges, Progress (blog post)](https://rajpurkar.github.io/mlx/qa-and-squad/) - [Hugging Face Course -- Question Answering](https://huggingface.co/course/chapter7/7)
datasets/metrics/squad_v2/README.md/0
{ "file_path": "datasets/metrics/squad_v2/README.md", "repo_id": "datasets", "token_count": 2372 }
174
<jupyter_start><jupyter_text>**⚠️ This notebook is deprecated in favor of the [Quickstart notebook](https://github.com/huggingface/notebooks/blob/main/datasets_doc/en/quickstart.ipynb)** HuggingFace 🤗 Datasets library - Quick overviewModels come and go (linear models, LSTM, Transformers, ...) but two core elements have consistently been the beating heart of Natural Language Processing: Datasets & Metrics🤗 Datasets is a fast and efficient library to easily share and load datasets, already providing access to the public datasets in the [Hugging Face Hub](https://huggingface.co/datasets).The library has several interesting features (besides easy access to datasets):- Build-in interoperability with PyTorch, Tensorflow 2, Pandas and Numpy- Lighweight and fast library with a transparent and pythonic API- Strive on large datasets: frees you from RAM memory limits, all datasets are memory-mapped on drive by default.- Smart caching with an intelligent `tf.data`-like cache: never wait for your data to process several times🤗 Datasets originated from a fork of the awesome Tensorflow-Datasets and the HuggingFace team want to deeply thank the team behind this amazing library and user API. We have tried to keep a layer of compatibility with `tfds` and can provide conversion from one format to the other.To learn more about how to use metrics, take a look at the library 🤗 [Evaluate](https://huggingface.co/docs/evaluate/index)! In addition to metrics, you can find more tools for evaluating models and datasets. Main datasets APIThis notebook is a quick dive in the main user API for loading datasets in `datasets`<jupyter_code># install datasets !pip install datasets # Let's import the library. We typically only need at most two methods: from datasets import list_datasets, load_dataset from pprint import pprint<jupyter_output><empty_output><jupyter_text>Listing the currently available datasets<jupyter_code># Currently available datasets datasets = list_datasets() print(f"🤩 Currently {len(datasets)} datasets are available on the hub:") pprint(datasets[:100] + [f"{len(datasets) - 100} more..."], compact=True) # You can access various attributes of the datasets before downloading them squad_dataset = list_datasets(with_details=True)[datasets.index('squad')] pprint(squad_dataset.__dict__) # It's a simple python dataclass<jupyter_output>{'_id': '621ffdd236468d709f181f95', 'author': None, 'cardData': {'annotations_creators': ['crowdsourced'], 'dataset_info': {'config_name': 'plain_text', 'dataset_size': 89789763, 'download_size': 35142551, 'features': [{'dtype': 'string', 'name': 'id'}, {'dtype': 'string', 'name': 'title'}, {'dtype': 'string', 'name': 'context'}, {'dtype': 'string', 'name': 'question'}, {'name': 'answers', 'sequence': [{'dtype': 'string', 'name': 'text'}, [...]<jupyter_text>An example with SQuAD<jupyter_code># Downloading and loading a dataset dataset = load_dataset('squad', split='validation[:10%]')<jupyter_output>WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453)<jupyter_text>This call to `datasets.load_dataset()` does the following steps under the hood:1. Download and import in the library the **SQuAD python processing script** from HuggingFace AWS bucket if it's not already stored in the library. You can find the SQuAD processing script [here](https://github.com/huggingface/datasets/tree/master/datasets/squad/squad.py) for instance. Processing scripts are small python scripts which define the info (citation, description) and format of the dataset and contain the URL to the original SQuAD JSON files and the code to load examples from the original SQuAD JSON files.2. Run the SQuAD python processing script which will: - **Download the SQuAD dataset** from the original URL (see the script) if it's not already downloaded and cached. - **Process and cache** all SQuAD in a structured Arrow table for each standard splits stored on the drive. Arrow table are arbitrarily long tables, typed with types that can be mapped to numpy/pandas/python standard types and can store nested objects. They can be directly access from drive, loaded in RAM or even streamed over the web. 3. Return a **dataset built from the splits** asked by the user (default: all); in the above example we create a dataset with the first 10% of the validation split.<jupyter_code># Informations on the dataset (description, citation, size, splits, format...) # are provided in `dataset.info` (a simple python dataclass) and also as direct attributes in the dataset object pprint(dataset.info.__dict__)<jupyter_output>{'builder_name': 'squad', 'citation': '@article{2016arXiv160605250R,\n' ' author = {{Rajpurkar}, Pranav and {Zhang}, Jian and ' '{Lopyrev},\n' ' Konstantin and {Liang}, Percy},\n' ' title = "{SQuAD: 100,000+ Questions for Machine ' 'Comprehension of Text}",\n' ' journal = {arXiv e-prints},\n' ' year = 2016,\n' ' eid = {arXiv:1606.05250},\n' ' pages = {arXiv:1606.05250},\n' 'archivePrefix = {arXiv},\n' ' eprint = {1606.05250},\n' '}\n', 'config_name': 'plain_text', 'dataset_size': 89819092, 'description': 'Stanford Question Answering Dataset (SQuAD) is a reading ' 'comprehension dataset, consisting of questions posed by ' 'crowdworkers on a set of Wikipedia articles, where the answer ' 'to every question is a segment of[...]<jupyter_text>Inspecting and using the dataset: elements, slices and columns The returned `Dataset` object is a memory mapped dataset that behaves similarly to a normal map-style dataset. It is backed by an Apache Arrow table which allows many interesting features.<jupyter_code>print(dataset)<jupyter_output>Dataset({ features: ['id', 'title', 'context', 'question', 'answers'], num_rows: 1057 })<jupyter_text>You can query it's length and get items or slices like you would do normally with a python mapping.<jupyter_code>print(f"👉 Dataset len(dataset): {len(dataset)}") print("\n👉 First item 'dataset[0]':") pprint(dataset[0]) # Or get slices with several examples: print("\n👉Slice of the two items 'dataset[10:12]':") pprint(dataset[10:12]) # You can get a full column of the dataset by indexing with its name as a string: print(dataset['question'][:10])<jupyter_output>['Which NFL team represented the AFC at Super Bowl 50?', 'Which NFL team represented the NFC at Super Bowl 50?', 'Where did Super Bowl 50 take place?', 'Which NFL team won Super Bowl 50?', 'What color was used to emphasize the 50th anniversary of the Super Bowl?', 'What was the theme of Super Bowl 50?', 'What day was the game played on?', 'What is the AFC short for?', 'What was the theme of Super Bowl 50?', 'What does AFC stand for?']<jupyter_text>The `__getitem__` method will return different format depending on the type of query:- Items like `dataset[0]` are returned as dict of elements.- Slices like `dataset[10:20]` are returned as dict of lists of elements.- Columns like `dataset['question']` are returned as a list of elements.This may seems surprising at first but in our experiments it's actually a lot easier to use for data processing than returning the same format for each of these views on the dataset. In particular, you can easily iterate along columns in slices, and also naturally permute consecutive indexings with identical results as showed here by permuting column indexing with elements and slices:<jupyter_code>print(dataset[0]['question'] == dataset['question'][0]) print(dataset[10:20]['context'] == dataset['context'][10:20])<jupyter_output>True True<jupyter_text>Dataset are internally typed and structuredThe dataset is backed by one (or several) Apache Arrow tables which are typed and allows for fast retrieval and access as well as arbitrary-size memory mapping.This means respectively that the format for the dataset is clearly defined and that you can load datasets of arbitrary size without worrying about RAM memory limitation (basically the dataset take no space in RAM, it's directly read from drive when needed with fast IO access).<jupyter_code># You can inspect the dataset column names and types print("Column names:") pprint(dataset.column_names) print("Features:") pprint(dataset.features)<jupyter_output>Column names: ['id', 'title', 'context', 'question', 'answers'] Features: {'answers': Sequence(feature={'answer_start': Value(dtype='int32', id=None), 'text': Value(dtype='string', id=None)}, length=-1, id=None), 'context': Value(dtype='string', id=None), 'id': Value(dtype='string', id=None), 'question': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None)}<jupyter_text>Additional misc properties<jupyter_code># Datasets also have shapes informations print("The number of rows", dataset.num_rows, "also available as len(dataset)", len(dataset)) print("The number of columns", dataset.num_columns) print("The shape (rows, columns)", dataset.shape)<jupyter_output>The number of rows 1057 also available as len(dataset) 1057 The number of columns 5 The shape (rows, columns) (1057, 5)<jupyter_text>Modifying the dataset with `dataset.map`Now that we know how to inspect our dataset we also want to update it. For that there is a powerful method `.map()` which is inspired by `tf.data` map method and that you can use to apply a function to each examples, independently or in batch.`.map()` takes a callable accepting a dict as argument (same dict as the one returned by `dataset[i]`) and iterate over the dataset by calling the function on each example.<jupyter_code># Let's print the length of each `context` string in our subset of the dataset # (10% of the validation i.e. 1057 examples) dataset.map(lambda example: print(len(example['context']), end=','))<jupyter_output><empty_output><jupyter_text>This is basically the same as doing```pythonfor example in dataset: function(example)``` The above examples was a bit verbose. We can control the logging level of 🤗 Datasets with it's logging module:<jupyter_code>from datasets import logging logging.set_verbosity_warning() dataset.map(lambda example: print(len(example['context']), end=',')) # Let's keep it verbose for our tutorial though from datasets import logging logging.set_verbosity_info()<jupyter_output><empty_output><jupyter_text>The above example had no effect on the dataset because the method we supplied to `.map()` didn't return a `dict` or a `abc.Mapping` that could be used to update the examples in the dataset.In such a case, `.map()` will return the same dataset (`self`).Now let's see how we can use a method that actually modify the dataset. Modifying the dataset example by example The main interest of `.map()` is to update and modify the content of the table and leverage smart caching and fast backend.To use `.map()` to update elements in the table you need to provide a function with the following signature: `function(example: dict) -> dict`.<jupyter_code># Let's add a prefix 'My cute title: ' to each of our titles def add_prefix_to_title(example): example['title'] = 'My cute title: ' + example['title'] return example prefixed_dataset = dataset.map(add_prefix_to_title) print(prefixed_dataset.unique('title')) # `.unique()` is a super fast way to print the unique elemnts in a column (see the doc for all the methods)<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-242ccd893f32bdf9.arrow<jupyter_text>This call to `.map()` compute and return the updated table. It will also store the updated table in a cache file indexed by the current state and the mapped function.A subsequent call to `.map()` (even in another python session) will reuse the cached file instead of recomputing the operation.You can test this by running again the previous cell, you will see that the result are directly loaded from the cache and not re-computed again.The updated dataset returned by `.map()` is (again) directly memory mapped from drive and not allocated in RAM. The function you provide to `.map()` should accept an input with the format of an item of the dataset: `function(dataset[0])` and return a python dict.The columns and type of the outputs can be different than the input dict. In this case the new keys will be added as additional columns in the dataset.Bascially each dataset example dict is updated with the dictionary returned by the function like this: `example.update(function(example))`.<jupyter_code># Since the input example dict is updated with our function output dict, # we can actually just return the updated 'title' field titled_dataset = dataset.map(lambda example: {'title': 'My cutest title: ' + example['title']}) print(titled_dataset.unique('title'))<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-4f3eee21db868c87.arrow<jupyter_text>Removing columnsYou can also remove columns when running map with the `remove_columns=List[str]` argument.<jupyter_code># This will remove the 'title' column while doing the update (after having send it the the mapped function so you can use it in your function!) less_columns_dataset = dataset.map(lambda example: {'new_title': 'Wouhahh: ' + example['title']}, remove_columns=['title']) print(less_columns_dataset.column_names) print(less_columns_dataset.unique('new_title'))<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-2800c1727354fbe2.arrow<jupyter_text>Using examples indicesWith `with_indices=True`, dataset indices (from `0` to `len(dataset)`) will be supplied to the function which must thus have the following signature: `function(example: dict, indice: int) -> dict`<jupyter_code># This will add the index in the dataset to the 'question' field with_indices_dataset = dataset.map(lambda example, idx: {'question': f'{idx}: ' + example['question']}, with_indices=True) pprint(with_indices_dataset['question'][:5])<jupyter_output>WARNING:datasets.arrow_dataset:Loading cached processed dataset at /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453/cache-e23b98819de39aea.arrow<jupyter_text>Modifying the dataset with batched updates `.map()` can also work with batch of examples (slices of the dataset).This is particularly interesting if you have a function that can handle batch of inputs like the tokenizers of HuggingFace `tokenizers`.To work on batched inputs set `batched=True` when calling `.map()` and supply a function with the following signature: `function(examples: Dict[List]) -> Dict[List]` or, if you use indices, `function(examples: Dict[List], indices: List[int]) -> Dict[List]`).Bascially, your function should accept an input with the format of a slice of the dataset: `function(dataset[:10])`.<jupyter_code>!pip install transformers # Let's import a fast tokenizer that can work on batched inputs # (the 'Fast' tokenizers in HuggingFace) from transformers import BertTokenizerFast, logging as transformers_logging transformers_logging.set_verbosity_warning() tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') # Now let's batch tokenize our dataset 'context' encoded_dataset = dataset.map(lambda example: tokenizer(example['context']), batched=True) print("encoded_dataset[0]") pprint(encoded_dataset[0], compact=True) # we have added additional columns pprint(encoded_dataset.column_names) # Let show a more complex processing with the full preparation of the SQuAD dataset # for training a model from Transformers def convert_to_features(batch): # Tokenize contexts and questions (as pairs of inputs) encodings = tokenizer(batch['context'], batch['question'], truncation=True) # Compute start and end tokens for labels start_positions, end_positions = [], [] for i, answer in enumerate(batch['answers']): first_char = answer['answer_start'][0] last_char = first_char + len(answer['text'][0]) - 1 start_positions.append(encodings.char_to_token(i, first_char)) end_positions.append(encodings.char_to_token(i, last_char)) encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings encoded_dataset = dataset.map(convert_to_features, batched=True) # Now our dataset comprise the labels for the start and end position # as well as the offsets for converting back tokens # in span of the original string for evaluation print("column_names", encoded_dataset.column_names) print("start_positions", encoded_dataset[:5]['start_positions'])<jupyter_output>column_names ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] start_positions [34, 45, 80, 34, 98]<jupyter_text>Image datasets Images are loaded using Pillow:<jupyter_code>image_dataset = load_dataset("cats_vs_dogs", split="train") image_dataset[0] image_dataset[0]["image"]<jupyter_output><empty_output><jupyter_text>Audio datasets Audio files are decoded using torchaudio or librosa using to the sampling rate of your choice.To read mp3 files you need ffmpeg and restart your runtime<jupyter_code>!add-apt-repository -y ppa:jonathonf/ffmpeg-4 && apt update && apt install -y ffmpeg from datasets import load_dataset audio_dataset = load_dataset("common_voice", "fi", split="train") audio_dataset[0] audio_dataset[0]["audio"]["array"], audio_dataset[0]["audio"]["sampling_rate"]<jupyter_output><empty_output><jupyter_text>Audio decoding and resampling is done in-the-fly when accessing examples. You can change the sampling rate this way:<jupyter_code>from datasets import Audio audio_dataset = audio_dataset.cast_column("audio", Audio(sampling_rate=16_000)) audio_dataset[0]["audio"]["array"], audio_dataset[0]["audio"]["sampling_rate"]<jupyter_output><empty_output><jupyter_text>Formatting outputs for PyTorch, Tensorflow, Numpy, PandasNow that we have tokenized our inputs, we probably want to use this dataset in a `torch.Dataloader` or a `tf.data.Dataset`. There are various ways to approach this.Using the `set_format()` method, we can:- format the indexing (`__getitem__`) to return numpy/pytorch/tensorflow tensors, instead of python objects, and- format the indexing (`__getitem__`) to return only the subset of the columns that we need for our model inputs. We don't want the columns `id` or `title` as inputs to train our model, but we could still want to keep them in the dataset, for instance for the evaluation of the model. This is handled by the `.set_format(type: Union[None, str], columns: Union[None, str, List[str]])` where:- `type` define the return type for our dataset `__getitem__` method and is one of `[None, 'numpy', 'pandas', 'torch', 'tensorflow']` (`None` means return python objects), and- `columns` define the columns returned by `__getitem__` and takes the name of a column in the dataset or a list of columns to return (`None` means return all columns).<jupyter_code>columns_to_return = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] # Uncomment whichever one is appropriate for you # encoded_dataset.set_format(type='torch', columns=columns_to_return) encoded_dataset.set_format(type='tensorflow', columns=columns_to_return) # Our dataset indexing output is now ready for being used in a pytorch dataloader pprint(encoded_dataset[1], compact=True) # Note that the columns are not removed from the dataset, just not returned when calling __getitem__ # Similarly the inner type of the dataset is not changed to torch.Tensor, the conversion and filtering is done on-the-fly when querying the dataset print(encoded_dataset.column_names) # We can remove the formatting with `.reset_format()` # or, identically, a call to `.set_format()` with no arguments encoded_dataset.reset_format() pprint(encoded_dataset[1], compact=True) # The current format can be checked with `.format`, # which is a dict of the type and formatting pprint(encoded_dataset.format)<jupyter_output>{'columns': ['id', 'title', 'context', 'question', 'answers', 'input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'], 'format_kwargs': {}, 'output_all_columns': False, 'type': None}<jupyter_text>There is also a convenience method, `to_tf_dataset()`, for the creation of `tf.data.Dataset` objects directly from a HuggingFace `Dataset`. An example will be shown below - when using this method, it is sufficient to pass the `columns` argument and your `DataCollator` - make sure you set the `return_tensors` argument of your `DataCollator` to `tf` or `np`, though, because TensorFlow won't be happy if you start passing it PyTorch Tensors! Wrapping this all upLet's wrap this all up with the full code to load and prepare SQuAD for training a PyTorch or TensorFlow model from HuggingFace `transformers` library.<jupyter_code>!pip install transformers import torch from datasets import load_dataset from transformers import BertTokenizerFast # Load our training dataset and tokenizer dataset = load_dataset('squad') tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') def get_correct_alignement(context, answer): """ Some original examples in SQuAD have indices wrong by 1 or 2 character. We test and fix this here. """ gold_text = answer['text'][0] start_idx = answer['answer_start'][0] end_idx = start_idx + len(gold_text) if context[start_idx:end_idx] == gold_text: return start_idx, end_idx # When the gold label position is good elif context[start_idx-1:end_idx-1] == gold_text: return start_idx-1, end_idx-1 # When the gold label is off by one character elif context[start_idx-2:end_idx-2] == gold_text: return start_idx-2, end_idx-2 # When the gold label is off by two character else: raise ValueError() # Tokenize our training dataset def convert_to_features(example_batch): # Tokenize contexts and questions (as pairs of inputs) encodings = tokenizer(example_batch['context'], example_batch['question'], truncation=True) # Compute start and end tokens for labels using Transformers's fast tokenizers alignement methods. start_positions, end_positions = [], [] for i, (context, answer) in enumerate(zip(example_batch['context'], example_batch['answers'])): start_idx, end_idx = get_correct_alignement(context, answer) start_positions.append(encodings.char_to_token(i, start_idx)) end_positions.append(encodings.char_to_token(i, end_idx-1)) encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings encoded_dataset = dataset.map(convert_to_features, batched=True)<jupyter_output>INFO:datasets.builder:No config specified, defaulting to the single config: squad/plain_text INFO:datasets.info:Loading Dataset Infos from /root/.cache/huggingface/modules/datasets_modules/datasets/squad/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453 INFO:datasets.builder:Overwrite dataset info from restored data version if exists. INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453 WARNING:datasets.builder:Found cached dataset squad (/root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453) INFO:datasets.info:Loading Dataset info from /root/.cache/huggingface/datasets/squad/plain_text/1.0.0/d6ec3ceb99ca480ce37cdd35555d6cb2511d223b9150cce08a837ef62ffea453<jupyter_text>That's the end of the shared preprocessing! Next, for Torch, we set our dataset format and create a `dataloader`. If you're using TensorFlow, skip to the next block.<jupyter_code># Format our dataset to outputs torch.Tensor to train a pytorch model columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] encoded_dataset.set_format(type='torch', columns=columns) # Instantiate a PyTorch Dataloader around our dataset # Let's do dynamic batching (pad on the fly with our own collate_fn) def collate_fn(examples): return tokenizer.pad(examples, return_tensors='pt') dataloader = torch.utils.data.DataLoader(encoded_dataset['train'], collate_fn=collate_fn, batch_size=8)<jupyter_output><empty_output><jupyter_text>For TensorFlow, we use the `to_tf_dataset()` method to get a `tf.data.Dataset`.<jupyter_code>columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] # Let's do dynamic batching (pad on the fly with our own collate_fn) def collate_fn(examples): return tokenizer.pad(examples, return_tensors='np') # to_tf_dataset() returns a tf.data.Dataset that we can pass straight to model.fit(). encoded_tf_dataset = encoded_dataset['train'].to_tf_dataset( columns=columns, collate_fn=collate_fn, batch_size=8, shuffle=True, )<jupyter_output>You're using a BertTokenizerFast tokenizer. Please note that with a fast tokenizer, using the `__call__` method is faster than using a method to encode the text followed by a call to the `pad` method to get a padded encoding.<jupyter_text>Next, we initialize our model. The next two blocks show model creation and training in Torch. For TensorFlow, skip ahead!<jupyter_code># Let's load a pretrained Bert model and a simple optimizer from transformers import AutoModelForQuestionAnswering model = AutoModelForQuestionAnswering.from_pretrained('bert-base-cased', return_dict=True) optimizer = torch.optim.Adam(model.parameters(), lr=1e-5) # Now let's train our model device = 'cuda' if torch.cuda.is_available() else 'cpu' model.train().to(device) for i, batch in enumerate(dataloader): batch.to(device) outputs = model(**batch) loss = outputs.loss loss.backward() optimizer.step() model.zero_grad() print(f'Step {i} - loss: {loss:.3}') if i > 5: break<jupyter_output>Step 0 - loss: 5.65 Step 1 - loss: 5.63 Step 2 - loss: 5.18 Step 3 - loss: 5.6 Step 4 - loss: 5.29 Step 5 - loss: 5.51 Step 6 - loss: 5.49<jupyter_text>Next, we'll initialize and train our TensorFlow model. Note the lack of a loss argument when we `compile()` our model here! All Transformers models support computing loss internally. When no loss argument is provided, the model will use its internal loss - this is especially helpful for cases like QA models, when the loss can be quite complex.<jupyter_code># Let's load a pretrained Bert model and a simple optimizer from transformers import TFAutoModelForQuestionAnswering import tensorflow as tf model = TFAutoModelForQuestionAnswering.from_pretrained('bert-base-cased') # No loss argument! model.compile(optimizer=tf.keras.optimizers.Adam(1e-5))<jupyter_output>All model checkpoint layers were used when initializing TFBertForQuestionAnswering. Some layers of TFBertForQuestionAnswering were not initialized from the model checkpoint at bert-base-cased and are newly initialized: ['qa_outputs'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference. No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! To disable this behaviour please pass a loss argument, or explicitly pass `loss=None` if you do not want your model to compute a loss.<jupyter_text>Now that all the preprocessing is done, training is an extremely comforting single line of Keras. We stop training early with the `steps_per_epoch` argument - you should probably leave that one out of your actual production code!<jupyter_code>model.fit(encoded_tf_dataset, epochs=1, steps_per_epoch=3)<jupyter_output>3/3 [==============================] - 73s 927ms/step - loss: 5.5575<jupyter_text>Example with a NER metric: `seqeval`<jupyter_code>!pip install evaluate seqeval import evaluate ner_metric = evaluate.load('seqeval') references = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] predictions = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] ner_metric.compute(predictions=predictions, references=references)<jupyter_output>Looking in indexes: https://pypi.org/simple, https://us-python.pkg.dev/colab-wheels/public/simple/ Requirement already satisfied: evaluate in /usr/local/lib/python3.10/dist-packages (0.4.0) Requirement already satisfied: seqeval in /usr/local/lib/python3.10/dist-packages (1.2.2) Requirement already satisfied: datasets>=2.0.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.12.0) Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.22.4) Requirement already satisfied: dill in /usr/local/lib/python3.10/dist-packages (from evaluate) (0.3.6) Requirement already satisfied: pandas in /usr/local/lib/python3.10/dist-packages (from evaluate) (1.5.3) Requirement already satisfied: requests>=2.19.0 in /usr/local/lib/python3.10/dist-packages (from evaluate) (2.27.1) Requirement already satisfied: tqdm>=4.66.3 in /usr/local/lib/python3.10/dist-packages (from evaluate) (4.65.0) Requirement already satisfied: xxhash in /usr/local/lib/py[...]
datasets/notebooks/Overview.ipynb/0
{ "file_path": "datasets/notebooks/Overview.ipynb", "repo_id": "datasets", "token_count": 10406 }
175
import platform from argparse import ArgumentParser import fsspec import huggingface_hub import pandas import pyarrow from datasets import __version__ as version from datasets.commands import BaseDatasetsCLICommand def info_command_factory(_): return EnvironmentCommand() class EnvironmentCommand(BaseDatasetsCLICommand): @staticmethod def register_subcommand(parser: ArgumentParser): download_parser = parser.add_parser("env", help="Print relevant system environment info.") download_parser.set_defaults(func=info_command_factory) def run(self): info = { "`datasets` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "`huggingface_hub` version": huggingface_hub.__version__, "PyArrow version": pyarrow.__version__, "Pandas version": pandas.__version__, "`fsspec` version": fsspec.__version__, } print("\nCopy-and-paste the text below in your GitHub issue.\n") print(self.format_dict(info)) return info @staticmethod def format_dict(d): return "\n".join([f"- {prop}: {val}" for prop, val in d.items()]) + "\n"
datasets/src/datasets/commands/env.py/0
{ "file_path": "datasets/src/datasets/commands/env.py", "repo_id": "datasets", "token_count": 476 }
176
import os import sys import warnings from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.download_config import DownloadConfig from ..table import array_cast from ..utils.file_utils import is_local_path, xopen from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: import PIL.Image from .features import FeatureType _IMAGE_COMPRESSION_FORMATS: Optional[List[str]] = None _NATIVE_BYTEORDER = "<" if sys.byteorder == "little" else ">" # Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image _VALID_IMAGE_ARRAY_DTPYES = [ np.dtype("|b1"), np.dtype("|u1"), np.dtype("<u2"), np.dtype(">u2"), np.dtype("<i2"), np.dtype(">i2"), np.dtype("<u4"), np.dtype(">u4"), np.dtype("<i4"), np.dtype(">i4"), np.dtype("<f4"), np.dtype(">f4"), np.dtype("<f8"), np.dtype(">f8"), ] @dataclass class Image: """Image [`Feature`] to read image data from an image file. Input: The Image feature accepts as input: - A `str`: Absolute path to the image file (i.e. random access is allowed). - A `dict` with the keys: - `path`: String with relative path of the image file to the archive file. - `bytes`: Bytes of the image file. This is useful for archived files with sequential access. - An `np.ndarray`: NumPy array representing an image. - A `PIL.Image.Image`: PIL image object. Args: mode (`str`, *optional*): The mode to convert the image to. If `None`, the native mode of the image is used. decode (`bool`, defaults to `True`): Whether to decode the image data. If `False`, returns the underlying dictionary in the format `{"path": image_path, "bytes": image_bytes}`. Examples: ```py >>> from datasets import load_dataset, Image >>> ds = load_dataset("beans", split="train") >>> ds.features["image"] Image(decode=True, id=None) >>> ds[0]["image"] <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=500x500 at 0x15E52E7F0> >>> ds = ds.cast_column('image', Image(decode=False)) {'bytes': None, 'path': '/root/.cache/huggingface/datasets/downloads/extracted/b0a21163f78769a2cf11f58dfc767fb458fc7cea5c05dccc0144a2c0f0bc1292/train/healthy/healthy_train.85.jpg'} ``` """ mode: Optional[str] = None decode: bool = True id: Optional[str] = None # Automatically constructed dtype: ClassVar[str] = "PIL.Image.Image" pa_type: ClassVar[Any] = pa.struct({"bytes": pa.binary(), "path": pa.string()}) _type: str = field(default="Image", init=False, repr=False) def __call__(self): return self.pa_type def encode_example(self, value: Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict: """Encode example into a format for Arrow. Args: value (`str`, `np.ndarray`, `PIL.Image.Image` or `dict`): Data passed as input to Image feature. Returns: `dict` with "path" and "bytes" fields """ if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if isinstance(value, list): value = np.array(value) if isinstance(value, str): return {"path": value, "bytes": None} elif isinstance(value, bytes): return {"path": None, "bytes": value} elif isinstance(value, np.ndarray): # convert the image array to PNG/TIFF bytes return encode_np_array(value) elif isinstance(value, PIL.Image.Image): # convert the PIL image to bytes (default format is PNG/TIFF) return encode_pil_image(value) elif value.get("path") is not None and os.path.isfile(value["path"]): # we set "bytes": None to not duplicate the data if they're already available locally return {"bytes": None, "path": value.get("path")} elif value.get("bytes") is not None or value.get("path") is not None: # store the image bytes, and path is used to infer the image format using the file extension return {"bytes": value.get("bytes"), "path": value.get("path")} else: raise ValueError( f"An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}." ) def decode_example(self, value: dict, token_per_repo_id=None) -> "PIL.Image.Image": """Decode example image file into image data. Args: value (`str` or `dict`): A string with the absolute image file path, a dictionary with keys: - `path`: String with absolute or relative image file path. - `bytes`: The bytes of the image file. token_per_repo_id (`dict`, *optional*): To access and decode image files from private repositories on the Hub, you can pass a dictionary repo_id (`str`) -> token (`bool` or `str`). Returns: `PIL.Image.Image` """ if not self.decode: raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.") if config.PIL_AVAILABLE: import PIL.Image import PIL.ImageOps else: raise ImportError("To support decoding images, please install 'Pillow'.") if token_per_repo_id is None: token_per_repo_id = {} path, bytes_ = value["path"], value["bytes"] if bytes_ is None: if path is None: raise ValueError(f"An image should have one of 'path' or 'bytes' but both are None in {value}.") else: if is_local_path(path): image = PIL.Image.open(path) else: source_url = path.split("::")[-1] pattern = ( config.HUB_DATASETS_URL if source_url.startswith(config.HF_ENDPOINT) else config.HUB_DATASETS_HFFS_URL ) try: repo_id = string_to_dict(source_url, pattern)["repo_id"] token = token_per_repo_id.get(repo_id) except ValueError: token = None download_config = DownloadConfig(token=token) with xopen(path, "rb", download_config=download_config) as f: bytes_ = BytesIO(f.read()) image = PIL.Image.open(bytes_) else: image = PIL.Image.open(BytesIO(bytes_)) image.load() # to avoid "Too many open files" errors if image.getexif().get(PIL.Image.ExifTags.Base.Orientation) is not None: image = PIL.ImageOps.exif_transpose(image) if self.mode and self.mode != image.mode: image = image.convert(self.mode) return image def flatten(self) -> Union["FeatureType", Dict[str, "FeatureType"]]: """If in the decodable state, return the feature itself, otherwise flatten the feature into a dictionary.""" from .features import Value return ( self if self.decode else { "bytes": Value("binary"), "path": Value("string"), } ) def cast_storage(self, storage: Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray: """Cast an Arrow array to the Image arrow storage type. The Arrow types that can be converted to the Image pyarrow storage type are: - `pa.string()` - it must contain the "path" data - `pa.binary()` - it must contain the image bytes - `pa.struct({"bytes": pa.binary()})` - `pa.struct({"path": pa.string()})` - `pa.struct({"bytes": pa.binary(), "path": pa.string()})` - order doesn't matter - `pa.list(*)` - it must contain the image array data Args: storage (`Union[pa.StringArray, pa.StructArray, pa.ListArray]`): PyArrow array to cast. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ if pa.types.is_string(storage.type): bytes_array = pa.array([None] * len(storage), type=pa.binary()) storage = pa.StructArray.from_arrays([bytes_array, storage], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_binary(storage.type): path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([storage, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("bytes") >= 0: bytes_array = storage.field("bytes") else: bytes_array = pa.array([None] * len(storage), type=pa.binary()) if storage.type.get_field_index("path") >= 0: path_array = storage.field("path") else: path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=storage.is_null()) elif pa.types.is_list(storage.type): bytes_array = pa.array( [encode_np_array(np.array(arr))["bytes"] if arr is not None else None for arr in storage.to_pylist()], type=pa.binary(), ) path_array = pa.array([None] * len(storage), type=pa.string()) storage = pa.StructArray.from_arrays( [bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null() ) return array_cast(storage, self.pa_type) def embed_storage(self, storage: pa.StructArray) -> pa.StructArray: """Embed image files into the Arrow array. Args: storage (`pa.StructArray`): PyArrow array to embed. Returns: `pa.StructArray`: Array in the Image arrow storage type, that is `pa.struct({"bytes": pa.binary(), "path": pa.string()})`. """ @no_op_if_value_is_null def path_to_bytes(path): with xopen(path, "rb") as f: bytes_ = f.read() return bytes_ bytes_array = pa.array( [ (path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None for x in storage.to_pylist() ], type=pa.binary(), ) path_array = pa.array( [os.path.basename(path) if path is not None else None for path in storage.field("path").to_pylist()], type=pa.string(), ) storage = pa.StructArray.from_arrays([bytes_array, path_array], ["bytes", "path"], mask=bytes_array.is_null()) return array_cast(storage, self.pa_type) def list_image_compression_formats() -> List[str]: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") global _IMAGE_COMPRESSION_FORMATS if _IMAGE_COMPRESSION_FORMATS is None: PIL.Image.init() _IMAGE_COMPRESSION_FORMATS = list(set(PIL.Image.OPEN.keys()) & set(PIL.Image.SAVE.keys())) return _IMAGE_COMPRESSION_FORMATS def image_to_bytes(image: "PIL.Image.Image") -> bytes: """Convert a PIL Image object to bytes using native compression if possible, otherwise use PNG/TIFF compression.""" buffer = BytesIO() if image.format in list_image_compression_formats(): format = image.format else: format = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF" image.save(buffer, format=format) return buffer.getvalue() def encode_pil_image(image: "PIL.Image.Image") -> dict: if hasattr(image, "filename") and image.filename != "": return {"path": image.filename, "bytes": None} else: return {"path": None, "bytes": image_to_bytes(image)} def encode_np_array(array: np.ndarray) -> dict: if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") dtype = array.dtype dtype_byteorder = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER dtype_kind = dtype.kind dtype_itemsize = dtype.itemsize dest_dtype = None # Multi-channel array case (only np.dtype("|u1") is allowed) if array.shape[2:]: if dtype_kind not in ["u", "i"]: raise TypeError( f"Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays." ) dest_dtype = np.dtype("|u1") if dtype != dest_dtype: warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") # Exact match elif dtype in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = dtype else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually) while dtype_itemsize >= 1: dtype_str = dtype_byteorder + dtype_kind + str(dtype_itemsize) if np.dtype(dtype_str) in _VALID_IMAGE_ARRAY_DTPYES: dest_dtype = np.dtype(dtype_str) warnings.warn(f"Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'") break else: dtype_itemsize //= 2 if dest_dtype is None: raise TypeError( f"Cannot downcast dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}" ) image = PIL.Image.fromarray(array.astype(dest_dtype)) return {"path": None, "bytes": image_to_bytes(image)} def objects_to_list_of_image_dicts( objs: Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]], ) -> List[dict]: """Encode a list of objects into a format suitable for creating an extension array of type `ImageExtensionType`.""" if config.PIL_AVAILABLE: import PIL.Image else: raise ImportError("To support encoding images, please install 'Pillow'.") if objs: _, obj = first_non_null_value(objs) if isinstance(obj, str): return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs] if isinstance(obj, np.ndarray): obj_to_image_dict_func = no_op_if_value_is_null(encode_np_array) return [obj_to_image_dict_func(obj) for obj in objs] elif isinstance(obj, PIL.Image.Image): obj_to_image_dict_func = no_op_if_value_is_null(encode_pil_image) return [obj_to_image_dict_func(obj) for obj in objs] else: return objs else: return objs
datasets/src/datasets/features/image.py/0
{ "file_path": "datasets/src/datasets/features/image.py", "repo_id": "datasets", "token_count": 6979 }
177
import itertools from dataclasses import dataclass from typing import Optional import pyarrow as pa import datasets from datasets.table import table_cast logger = datasets.utils.logging.get_logger(__name__) @dataclass class ArrowConfig(datasets.BuilderConfig): """BuilderConfig for Arrow.""" features: Optional[datasets.Features] = None class Arrow(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = ArrowConfig def _info(self): return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") dl_manager.download_config.extract_on_the_fly = True data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] # Infer features is they are stoed in the arrow schema if self.info.features is None: for file in itertools.chain.from_iterable(files): with open(file, "rb") as f: self.info.features = datasets.Features.from_arrow_schema(pa.ipc.open_stream(f).schema) break splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.info.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.info.features.arrow_schema) return pa_table def _generate_tables(self, files): for file_idx, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: try: for batch_idx, record_batch in enumerate(pa.ipc.open_stream(f)): pa_table = pa.Table.from_batches([record_batch]) # Uncomment for debugging (will print the Arrow table size and elements) # logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}") # logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows))) yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) except ValueError as e: logger.error(f"Failed to read file '{file}' with error {type(e)}: {e}") raise
datasets/src/datasets/packaged_modules/arrow/arrow.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/arrow/arrow.py", "repo_id": "datasets", "token_count": 1500 }
178
import itertools import warnings from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class PandasConfig(datasets.BuilderConfig): """BuilderConfig for Pandas.""" features: Optional[datasets.Features] = None class Pandas(datasets.ArrowBasedBuilder): BUILDER_CONFIG_CLASS = PandasConfig def _info(self): warnings.warn( "The Pandas builder is deprecated and will be removed in the next major version of datasets.", FutureWarning, ) return datasets.DatasetInfo(features=self.config.features) def _split_generators(self, dl_manager): """We handle string, list and dicts in datafiles""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _cast_table(self, pa_table: pa.Table) -> pa.Table: if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example pa_table = table_cast(pa_table, self.config.features.arrow_schema) return pa_table def _generate_tables(self, files): for i, file in enumerate(itertools.chain.from_iterable(files)): with open(file, "rb") as f: pa_table = pa.Table.from_pandas(pd.read_pickle(f)) yield i, self._cast_table(pa_table)
datasets/src/datasets/packaged_modules/pandas/pandas.py/0
{ "file_path": "datasets/src/datasets/packaged_modules/pandas/pandas.py", "repo_id": "datasets", "token_count": 1011 }
179
import importlib import inspect from functools import wraps from typing import TYPE_CHECKING, Optional from .download.download_config import DownloadConfig from .utils.file_utils import ( xbasename, xdirname, xet_parse, xexists, xgetsize, xglob, xgzip_open, xisdir, xisfile, xjoin, xlistdir, xnumpy_load, xopen, xpandas_read_csv, xpandas_read_excel, xPath, xpyarrow_parquet_read_table, xrelpath, xsio_loadmat, xsplit, xsplitext, xwalk, xxml_dom_minidom_parse, ) from .utils.logging import get_logger from .utils.patching import patch_submodule from .utils.py_utils import get_imports, lock_importable_file logger = get_logger(__name__) if TYPE_CHECKING: from .builder import DatasetBuilder def extend_module_for_streaming(module_path, download_config: Optional[DownloadConfig] = None): """Extend the module to support streaming. We patch some functions in the module to use `fsspec` to support data streaming: - We use `fsspec.open` to open and read remote files. We patch the module function: - `open` - We use the "::" hop separator to join paths and navigate remote compressed/archive files. We patch the module functions: - `os.path.join` - `pathlib.Path.joinpath` and `pathlib.Path.__truediv__` (called when using the "/" operator) The patched functions are replaced with custom functions defined to work with the :class:`~download.streaming_download_manager.StreamingDownloadManager`. Args: module_path: Path to the module to be extended. download_config : mainly use use_auth_token or storage_options to support different platforms and auth types. """ module = importlib.import_module(module_path) # TODO(QL): always update the module to add subsequent new authentication without removing old ones if hasattr(module, "_patched_for_streaming") and module._patched_for_streaming: if isinstance(module._patched_for_streaming, DownloadConfig): module._patched_for_streaming.token = download_config.token module._patched_for_streaming.storage_options = download_config.storage_options return def wrap_auth(function): @wraps(function) def wrapper(*args, **kwargs): return function(*args, download_config=download_config, **kwargs) wrapper._decorator_name_ = "wrap_auth" return wrapper # open files in a streaming fashion patch_submodule(module, "open", wrap_auth(xopen)).start() patch_submodule(module, "os.listdir", wrap_auth(xlistdir)).start() patch_submodule(module, "os.walk", wrap_auth(xwalk)).start() patch_submodule(module, "glob.glob", wrap_auth(xglob)).start() # allow to navigate in remote zip files patch_submodule(module, "os.path.join", xjoin).start() patch_submodule(module, "os.path.dirname", xdirname).start() patch_submodule(module, "os.path.basename", xbasename).start() patch_submodule(module, "os.path.relpath", xrelpath).start() patch_submodule(module, "os.path.split", xsplit).start() patch_submodule(module, "os.path.splitext", xsplitext).start() # allow checks on paths patch_submodule(module, "os.path.exists", wrap_auth(xexists)).start() patch_submodule(module, "os.path.isdir", wrap_auth(xisdir)).start() patch_submodule(module, "os.path.isfile", wrap_auth(xisfile)).start() patch_submodule(module, "os.path.getsize", wrap_auth(xgetsize)).start() patch_submodule(module, "pathlib.Path", xPath).start() # file readers patch_submodule(module, "gzip.open", wrap_auth(xgzip_open)).start() patch_submodule(module, "numpy.load", wrap_auth(xnumpy_load)).start() patch_submodule(module, "pandas.read_csv", wrap_auth(xpandas_read_csv), attrs=["__version__"]).start() patch_submodule(module, "pandas.read_excel", wrap_auth(xpandas_read_excel), attrs=["__version__"]).start() patch_submodule(module, "scipy.io.loadmat", wrap_auth(xsio_loadmat), attrs=["__version__"]).start() patch_submodule(module, "xml.etree.ElementTree.parse", wrap_auth(xet_parse)).start() patch_submodule(module, "xml.dom.minidom.parse", wrap_auth(xxml_dom_minidom_parse)).start() # pyarrow: do not patch pyarrow attribute in packaged modules if not module.__name__.startswith("datasets.packaged_modules."): patch_submodule(module, "pyarrow.parquet.read_table", wrap_auth(xpyarrow_parquet_read_table)).start() module._patched_for_streaming = download_config def extend_dataset_builder_for_streaming(builder: "DatasetBuilder"): """Extend the dataset builder module and the modules imported by it to support streaming. Args: builder (:class:`DatasetBuilder`): Dataset builder instance. """ # this extends the open and os.path.join functions for data streaming download_config = DownloadConfig(storage_options=builder.storage_options, token=builder.token) extend_module_for_streaming(builder.__module__, download_config=download_config) # if needed, we also have to extend additional internal imports (like wmt14 -> wmt_utils) if not builder.__module__.startswith("datasets."): # check that it's not a packaged builder like csv importable_file = inspect.getfile(builder.__class__) with lock_importable_file(importable_file): for imports in get_imports(importable_file): if imports[0] == "internal": internal_import_name = imports[1] internal_module_name = ".".join(builder.__module__.split(".")[:-1] + [internal_import_name]) extend_module_for_streaming(internal_module_name, download_config=download_config) # builders can inherit from other builders that might use streaming functionality # (for example, ImageFolder and AudioFolder inherit from FolderBuilder which implements examples generation) # but these parents builders are not patched automatically as they are not instantiated, so we patch them here from .builder import DatasetBuilder parent_builder_modules = [ cls.__module__ for cls in type(builder).__mro__[1:] # make sure it's not the same module we've already patched if issubclass(cls, DatasetBuilder) and cls.__module__ != DatasetBuilder.__module__ ] # check it's not a standard builder from datasets.builder for module in parent_builder_modules: extend_module_for_streaming(module, download_config=download_config)
datasets/src/datasets/streaming.py/0
{ "file_path": "datasets/src/datasets/streaming.py", "repo_id": "datasets", "token_count": 2361 }
180
import enum import inspect import warnings from functools import wraps from typing import Callable, Optional from .logging import get_logger _emitted_deprecation_warnings = set() logger = get_logger(__name__) def deprecated(help_message: Optional[str] = None): """Decorator to mark a class or a function as deprecated. Args: help_message (:obj:`str`, optional): An optional message to guide the user on how to switch to non-deprecated usage of the library. """ def decorator(deprecated_class_or_function: Callable): global _emitted_deprecation_warnings if inspect.isclass(deprecated_class_or_function): deprecated_function = deprecated_class_or_function.__init__ name = deprecated_class_or_function.__name__ else: deprecated_function = deprecated_class_or_function name = deprecated_function.__name__ # Support deprecating __init__ class method: class name instead name = name if name != "__init__" else deprecated_function.__qualname__.split(".")[-2] warning_msg = ( f"{name} is deprecated and will be removed in the next major version of datasets." + f" {help_message}" if help_message else "" ) @wraps(deprecated_function) def wrapper(*args, **kwargs): func_hash = hash(deprecated_function) if func_hash not in _emitted_deprecation_warnings: warnings.warn(warning_msg, category=FutureWarning, stacklevel=2) _emitted_deprecation_warnings.add(func_hash) return deprecated_function(*args, **kwargs) wrapper._decorator_name_ = "deprecated" if inspect.isclass(deprecated_class_or_function): deprecated_class_or_function.__init__ = wrapper return deprecated_class_or_function else: return wrapper return decorator class OnAccess(enum.EnumMeta): """ Enum metaclass that calls a user-specified function whenever a member is accessed. """ def __getattribute__(cls, name): obj = super().__getattribute__(name) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj def __getitem__(cls, name): member = super().__getitem__(name) if member._on_access: member._on_access() return member def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): obj = super().__call__(value, names, module=module, qualname=qualname, type=type, start=start) if isinstance(obj, enum.Enum) and obj._on_access: obj._on_access() return obj class DeprecatedEnum(enum.Enum, metaclass=OnAccess): """ Enum class that calls `deprecate` method whenever a member is accessed. """ def __new__(cls, value): member = object.__new__(cls) member._value_ = value member._on_access = member.deprecate return member @property def help_message(self): return "" def deprecate(self): help_message = f" {self.help_message}" if self.help_message else "" warnings.warn( f"'{self.__objclass__.__name__}' is deprecated and will be removed in the next major version of datasets." + help_message, FutureWarning, stacklevel=3, )
datasets/src/datasets/utils/deprecation_utils.py/0
{ "file_path": "datasets/src/datasets/utils/deprecation_utils.py", "repo_id": "datasets", "token_count": 1426 }
181
{ "code": "Programming language (C++, Java, Javascript, Python, etc.)", "aa": "Afar", "aaa": "Ghotuo", "aab": "Alumu-Tesu", "aac": "Ari", "aad": "Amal", "aae": "Arbëreshë Albanian", "aaf": "Aranadan", "aag": "Ambrak", "aah": "Abu' Arapesh", "aai": "Arifama-Miniafia", "aak": "Ankave", "aal": "Afade", "aan": "Anambé", "aao": "Algerian Saharan Arabic", "aap": "Pará Arára", "aaq": "Eastern Abnaki", "aas": "Aasáx", "aat": "Arvanitika Albanian", "aau": "Abau", "aav": "Austro-Asiatic languages", "aaw": "Solong", "aax": "Mandobo Atas", "aaz": "Amarasi", "ab": "Abkhazian", "aba": "Abé", "abb": "Bankon", "abc": "Ambala Ayta", "abd": "Manide", "abe": "Western Abnaki", "abf": "Abai Sungai", "abg": "Abaga", "abh": "Tajiki Arabic", "abi": "Abidji", "abj": "Aka-Bea", "abl": "Lampung Nyo", "abm": "Abanyom", "abn": "Abua", "abo": "Abon", "abp": "Abellen Ayta", "abq": "Abaza", "abr": "Abron", "abs": "Ambonese Malay", "abt": "Ambulas", "abu": "Abure", "abv": "Baharna Arabic", "abw": "Pal", "abx": "Inabaknon", "aby": "Aneme Wake", "abz": "Abui", "aca": "Achagua", "acb": "Áncá", "acd": "Gikyode", "ace": "Achinese", "acf": "Saint Lucian Creole French", "ach": "Acoli", "aci": "Aka-Cari", "ack": "Aka-Kora", "acl": "Akar-Bale", "acm": "Mesopotamian Arabic", "acn": "Achang", "acp": "Eastern Acipa", "acq": "Ta'izzi-Adeni Arabic", "acr": "Achi", "acs": "Acroá", "act": "Achterhoeks", "acu": "Achuar-Shiwiar", "acv": "Achumawi", "acw": "Hijazi Arabic", "acx": "Omani Arabic", "acy": "Cypriot Arabic", "acz": "Acheron", "ada": "Adangme", "adb": "Atauran", "add": "Lidzonka; Dzodinka", "ade": "Adele", "adf": "Dhofari Arabic", "adg": "Andegerebinha", "adh": "Adhola", "adi": "Adi", "adj": "Adioukrou", "adl": "Galo", "adn": "Adang", "ado": "Abu", "adq": "Adangbe", "adr": "Adonara", "ads": "Adamorobe Sign Language", "adt": "Adnyamathanha", "adu": "Aduge", "adw": "Amundava", "adx": "Amdo Tibetan", "ady": "Adyghe; Adygei", "adz": "Adzera", "ae": "Avestan", "aea": "Areba", "aeb": "Tunisian Arabic", "aec": "Saidi Arabic", "aed": "Argentine Sign Language", "aee": "Northeast Pashai; Northeast Pashayi", "aek": "Haeke", "ael": "Ambele", "aem": "Arem", "aen": "Armenian Sign Language", "aeq": "Aer", "aer": "Eastern Arrernte", "aes": "Alsea", "aeu": "Akeu", "aew": "Ambakich", "aey": "Amele", "aez": "Aeka", "af": "Afrikaans", "afa": "Afro-Asiatic languages", "afb": "Gulf Arabic", "afd": "Andai", "afe": "Putukwam", "afg": "Afghan Sign Language", "afh": "Afrihili", "afi": "Akrukay; Chini", "afk": "Nanubae", "afn": "Defaka", "afo": "Eloyi", "afp": "Tapei", "afs": "Afro-Seminole Creole", "aft": "Afitti", "afu": "Awutu", "afz": "Obokuitai", "aga": "Aguano", "agb": "Legbo", "agc": "Agatu", "agd": "Agarabi", "age": "Angal", "agf": "Arguni", "agg": "Angor", "agh": "Ngelima", "agi": "Agariya", "agj": "Argobba", "agk": "Isarog Agta", "agl": "Fembe", "agm": "Angaataha", "agn": "Agutaynen", "ago": "Tainae", "agq": "Aghem", "agr": "Aguaruna", "ags": "Esimbi", "agt": "Central Cagayan Agta", "agu": "Aguacateco", "agv": "Remontado Dumagat", "agw": "Kahua", "agx": "Aghul", "agy": "Southern Alta", "agz": "Mt. Iriga Agta", "aha": "Ahanta", "ahb": "Axamb", "ahg": "Qimant", "ahh": "Aghu", "ahi": "Tiagbamrin Aizi", "ahk": "Akha", "ahl": "Igo", "ahm": "Mobumrin Aizi", "ahn": "Àhàn", "aho": "Ahom", "ahp": "Aproumu Aizi", "ahr": "Ahirani", "ahs": "Ashe", "aht": "Ahtena", "aia": "Arosi", "aib": "Ainu (China)", "aic": "Ainbai", "aid": "Alngith", "aie": "Amara", "aif": "Agi", "aig": "Antigua and Barbuda Creole English", "aih": "Ai-Cham", "aii": "Assyrian Neo-Aramaic", "aij": "Lishanid Noshan", "aik": "Ake", "ail": "Aimele", "aim": "Aimol", "ain": "Ainu (Japan)", "aio": "Aiton", "aip": "Burumakok", "aiq": "Aimaq", "air": "Airoran", "ait": "Arikem", "aiw": "Aari", "aix": "Aighon", "aiy": "Ali", "aja": "Aja (South Sudan)", "ajg": "Aja (Benin)", "aji": "Ajië", "ajn": "Andajin", "ajp": "South Levantine Arabic", "ajs": "Algerian Jewish Sign Language", "aju": "Judeo-Moroccan Arabic", "ajw": "Ajawa", "ajz": "Amri Karbi", "ak": "Akan", "akb": "Batak Angkola", "akc": "Mpur", "akd": "Ukpet-Ehom", "ake": "Akawaio", "akf": "Akpa", "akg": "Anakalangu", "akh": "Angal Heneng", "aki": "Aiome", "akj": "Aka-Jeru", "akk": "Akkadian", "akl": "Aklanon", "akm": "Aka-Bo", "ako": "Akurio", "akp": "Siwu", "akq": "Ak", "akr": "Araki", "aks": "Akaselem", "akt": "Akolet", "aku": "Akum", "akv": "Akhvakh", "akw": "Akwa", "akx": "Aka-Kede", "aky": "Aka-Kol", "akz": "Alabama", "ala": "Alago", "alc": "Qawasqar", "ald": "Alladian", "ale": "Aleut", "alf": "Alege", "alg": "Algonquian languages", "alh": "Alawa", "ali": "Amaimon", "alj": "Alangan", "alk": "Alak", "all": "Allar", "alm": "Amblong", "aln": "Gheg Albanian", "alo": "Larike-Wakasihu", "alp": "Alune", "alq": "Algonquin", "alr": "Alutor", "als": "Tosk Albanian", "alt": "Southern Altai", "alu": "'Are'are", "alv": "Atlantic-Congo languages", "alw": "Alaba-K’abeena; Wanbasana", "alx": "Amol", "aly": "Alyawarr", "alz": "Alur", "am": "Amharic", "ama": "Amanayé", "amb": "Ambo", "amc": "Amahuaca", "ame": "Yanesha'", "amf": "Hamer-Banna", "amg": "Amurdak", "ami": "Amis", "amj": "Amdang", "amk": "Ambai", "aml": "War-Jaintia", "amm": "Ama (Papua New Guinea)", "amn": "Amanab", "amo": "Amo", "amp": "Alamblak", "amq": "Amahai", "amr": "Amarakaeri", "ams": "Southern Amami-Oshima", "amt": "Amto", "amu": "Guerrero Amuzgo", "amv": "Ambelau", "amw": "Western Neo-Aramaic", "amx": "Anmatyerre", "amy": "Ami", "amz": "Atampaya", "an": "Aragonese", "ana": "Andaqui", "anb": "Andoa", "anc": "Ngas", "and": "Ansus", "ane": "Xârâcùù", "anf": "Animere", "ang": "Old English (ca. 450-1100)", "anh": "Nend", "ani": "Andi", "anj": "Anor", "ank": "Goemai", "anl": "Anu-Hkongso Chin", "anm": "Anal", "ann": "Obolo", "ano": "Andoque", "anp": "Angika", "anq": "Jarawa (India)", "anr": "Andh", "ans": "Anserma", "ant": "Antakarinya; Antikarinya", "anu": "Anuak", "anv": "Denya", "anw": "Anaang", "anx": "Andra-Hus", "any": "Anyin", "anz": "Anem", "aoa": "Angolar", "aob": "Abom", "aoc": "Pemon", "aod": "Andarum", "aoe": "Angal Enen", "aof": "Bragat", "aog": "Angoram", "aoi": "Anindilyakwa", "aoj": "Mufian", "aok": "Arhö", "aol": "Alor", "aom": "Ömie", "aon": "Bumbita Arapesh", "aor": "Aore", "aos": "Taikat", "aot": "Atong (India); A'tong", "aou": "A'ou", "aox": "Atorada", "aoz": "Uab Meto", "apa": "Apache languages", "apb": "Sa'a", "apc": "North Levantine Arabic", "apd": "Sudanese Arabic", "ape": "Bukiyip", "apf": "Pahanan Agta", "apg": "Ampanang", "aph": "Athpariya", "api": "Apiaká", "apj": "Jicarilla Apache", "apk": "Kiowa Apache", "apl": "Lipan Apache", "apm": "Mescalero-Chiricahua Apache", "apn": "Apinayé", "apo": "Ambul", "app": "Apma", "apq": "A-Pucikwar", "apr": "Arop-Lokep", "aps": "Arop-Sissano", "apt": "Apatani", "apu": "Apurinã", "apv": "Alapmunte", "apw": "Western Apache", "apx": "Aputai", "apy": "Apalaí", "apz": "Safeyoka", "aqa": "Alacalufan languages", "aqc": "Archi", "aqd": "Ampari Dogon", "aqg": "Arigidi", "aqk": "Aninka", "aql": "Algic languages", "aqm": "Atohwaim", "aqn": "Northern Alta", "aqp": "Atakapa", "aqr": "Arhâ", "aqt": "Angaité", "aqz": "Akuntsu", "ar": "Arabic", "arb": "Standard Arabic", "arc": "Official Aramaic (700-300 BCE); Imperial Aramaic (700-300 BCE)", "ard": "Arabana", "are": "Western Arrarnta", "arh": "Arhuaco", "ari": "Arikara", "arj": "Arapaso", "ark": "Arikapú", "arl": "Arabela", "arn": "Mapudungun; Mapuche", "aro": "Araona", "arp": "Arapaho", "arq": "Algerian Arabic", "arr": "Karo (Brazil)", "ars": "Najdi Arabic", "art": "Artificial languages", "aru": "Aruá (Amazonas State); Arawá", "arv": "Arbore", "arw": "Arawak", "arx": "Aruá (Rodonia State)", "ary": "Moroccan Arabic", "arz": "Egyptian Arabic", "as": "Assamese", "asa": "Asu (Tanzania)", "asb": "Assiniboine", "asc": "Casuarina Coast Asmat", "ase": "American Sign Language", "asf": "Auslan; Australian Sign Language", "asg": "Cishingini", "ash": "Abishira", "asi": "Buruwai", "asj": "Sari", "ask": "Ashkun", "asl": "Asilulu", "asn": "Xingú Asuriní", "aso": "Dano", "asp": "Algerian Sign Language", "asq": "Austrian Sign Language", "asr": "Asuri", "ass": "Ipulo", "ast": "Asturian; Asturleonese; Bable; Leonese", "asu": "Tocantins Asurini", "asv": "Asoa", "asw": "Australian Aborigines Sign Language", "asx": "Muratayak", "asy": "Yaosakor Asmat", "asz": "As", "ata": "Pele-Ata", "atb": "Zaiwa", "atc": "Atsahuaca", "atd": "Ata Manobo", "ate": "Atemble", "atg": "Ivbie North-Okpela-Arhe", "ath": "Athapascan languages", "ati": "Attié", "atj": "Atikamekw", "atk": "Ati", "atl": "Mt. Iraya Agta", "atm": "Ata", "atn": "Ashtiani", "ato": "Atong (Cameroon)", "atp": "Pudtol Atta", "atq": "Aralle-Tabulahan", "atr": "Waimiri-Atroari", "ats": "Gros Ventre", "att": "Pamplona Atta", "atu": "Reel", "atv": "Northern Altai", "atw": "Atsugewi", "atx": "Arutani", "aty": "Aneityum", "atz": "Arta", "aua": "Asumboa", "aub": "Alugu", "auc": "Waorani", "aud": "Anuta", "auf": "Arauan languages", "aug": "Aguna", "auh": "Aushi", "aui": "Anuki", "auj": "Awjilah", "auk": "Heyo", "aul": "Aulua", "aum": "Asu (Nigeria)", "aun": "Molmo One", "auo": "Auyokawa", "aup": "Makayam", "auq": "Anus; Korur", "aur": "Aruek", "aus": "Australian languages", "aut": "Austral", "auu": "Auye", "auw": "Awyi", "aux": "Aurá", "auy": "Awiyaana", "auz": "Uzbeki Arabic", "av": "Avaric", "avb": "Avau", "avd": "Alviri-Vidari", "avi": "Avikam", "avk": "Kotava", "avl": "Eastern Egyptian Bedawi Arabic", "avm": "Angkamuthi", "avn": "Avatime", "avo": "Agavotaguerra", "avs": "Aushiri", "avt": "Au", "avu": "Avokaya", "avv": "Avá-Canoeiro", "awa": "Awadhi", "awb": "Awa (Papua New Guinea)", "awc": "Cicipu", "awd": "Arawakan languages", "awe": "Awetí", "awg": "Anguthimri", "awh": "Awbono", "awi": "Aekyom", "awk": "Awabakal", "awm": "Arawum", "awn": "Awngi", "awo": "Awak", "awr": "Awera", "aws": "South Awyu", "awt": "Araweté", "awu": "Central Awyu", "awv": "Jair Awyu", "aww": "Awun", "awx": "Awara", "awy": "Edera Awyu", "axb": "Abipon", "axe": "Ayerrerenge", "axg": "Mato Grosso Arára", "axk": "Yaka (Central African Republic)", "axl": "Lower Southern Aranda", "axm": "Middle Armenian", "axx": "Xârâgurè", "ay": "Aymara", "aya": "Awar", "ayb": "Ayizo Gbe", "ayc": "Southern Aymara", "ayd": "Ayabadhu", "aye": "Ayere", "ayg": "Ginyanga", "ayh": "Hadrami Arabic", "ayi": "Leyigha", "ayk": "Akuku", "ayl": "Libyan Arabic", "ayn": "Sanaani Arabic", "ayo": "Ayoreo", "ayp": "North Mesopotamian Arabic", "ayq": "Ayi (Papua New Guinea)", "ayr": "Central Aymara", "ays": "Sorsogon Ayta", "ayt": "Magbukun Ayta", "ayu": "Ayu", "ayz": "Mai Brat", "az": "Azerbaijani", "aza": "Azha", "azb": "South Azerbaijani", "azc": "Uto-Aztecan languages", "azd": "Eastern Durango Nahuatl", "azg": "San Pedro Amuzgos Amuzgo", "azj": "North Azerbaijani", "azm": "Ipalapa Amuzgo", "azn": "Western Durango Nahuatl", "azo": "Awing", "azt": "Faire Atta", "azz": "Highland Puebla Nahuatl", "ba": "Bashkir", "baa": "Babatana", "bab": "Bainouk-Gunyuño", "bac": "Badui", "bad": "Banda languages", "bae": "Baré", "baf": "Nubaca", "bag": "Tuki", "bah": "Bahamas Creole English", "bai": "Bamileke languages", "baj": "Barakai", "bal": "Baluchi", "ban": "Balinese", "bao": "Waimaha", "bap": "Bantawa", "bar": "Bavarian", "bas": "Basa (Cameroon)", "bat": "Baltic languages", "bau": "Bada (Nigeria)", "bav": "Vengo", "baw": "Bambili-Bambui", "bax": "Bamun", "bay": "Batuley", "bba": "Baatonum", "bbb": "Barai", "bbc": "Batak Toba", "bbd": "Bau", "bbe": "Bangba", "bbf": "Baibai", "bbg": "Barama", "bbh": "Bugan", "bbi": "Barombi", "bbj": "Ghomálá'", "bbk": "Babanki", "bbl": "Bats", "bbm": "Babango", "bbn": "Uneapa", "bbo": "Northern Bobo Madaré; Konabéré", "bbp": "West Central Banda", "bbq": "Bamali", "bbr": "Girawa", "bbs": "Bakpinka", "bbt": "Mburku", "bbu": "Kulung (Nigeria)", "bbv": "Karnai", "bbw": "Baba", "bbx": "Bubia", "bby": "Befang", "bca": "Central Bai", "bcb": "Bainouk-Samik", "bcc": "Southern Balochi", "bcd": "North Babar", "bce": "Bamenyam", "bcf": "Bamu", "bcg": "Baga Pokur", "bch": "Bariai", "bci": "Baoulé", "bcj": "Bardi", "bck": "Bunuba", "bcl": "Central Bikol", "bcm": "Bannoni", "bcn": "Bali (Nigeria)", "bco": "Kaluli", "bcp": "Bali (Democratic Republic of Congo)", "bcq": "Bench", "bcr": "Babine", "bcs": "Kohumono", "bct": "Bendi", "bcu": "Awad Bing", "bcv": "Shoo-Minda-Nye", "bcw": "Bana", "bcy": "Bacama", "bcz": "Bainouk-Gunyaamolo", "bda": "Bayot", "bdb": "Basap", "bdc": "Emberá-Baudó", "bdd": "Bunama", "bde": "Bade", "bdf": "Biage", "bdg": "Bonggi", "bdh": "Baka (South Sudan)", "bdi": "Burun", "bdj": "Bai (South Sudan); Bai", "bdk": "Budukh", "bdl": "Indonesian Bajau", "bdm": "Buduma", "bdn": "Baldemu", "bdo": "Morom", "bdp": "Bende", "bdq": "Bahnar", "bdr": "West Coast Bajau", "bds": "Burunge", "bdt": "Bokoto", "bdu": "Oroko", "bdv": "Bodo Parja", "bdw": "Baham", "bdx": "Budong-Budong", "bdy": "Bandjalang", "bdz": "Badeshi", "be": "Belarusian", "bea": "Beaver", "beb": "Bebele", "bec": "Iceve-Maci", "bed": "Bedoanas", "bee": "Byangsi", "bef": "Benabena", "beg": "Belait", "beh": "Biali", "bei": "Bekati'", "bej": "Beja; Bedawiyet", "bek": "Bebeli", "bem": "Bemba (Zambia)", "beo": "Beami", "bep": "Besoa", "beq": "Beembe", "ber": "Berber languages", "bes": "Besme", "bet": "Guiberoua Béte", "beu": "Blagar", "bev": "Daloa Bété", "bew": "Betawi", "bex": "Jur Modo", "bey": "Beli (Papua New Guinea)", "bez": "Bena (Tanzania)", "bfa": "Bari", "bfb": "Pauri Bareli", "bfc": "Panyi Bai; Northern Bai", "bfd": "Bafut", "bfe": "Betaf; Tena", "bff": "Bofi", "bfg": "Busang Kayan", "bfh": "Blafe", "bfi": "British Sign Language", "bfj": "Bafanji", "bfk": "Ban Khor Sign Language", "bfl": "Banda-Ndélé", "bfm": "Mmen", "bfn": "Bunak", "bfo": "Malba Birifor", "bfp": "Beba", "bfq": "Badaga", "bfr": "Bazigar", "bfs": "Southern Bai", "bft": "Balti", "bfu": "Gahri", "bfw": "Bondo", "bfx": "Bantayanon", "bfy": "Bagheli", "bfz": "Mahasu Pahari", "bg": "Bulgarian", "bga": "Gwamhi-Wuri", "bgb": "Bobongko", "bgc": "Haryanvi", "bgd": "Rathwi Bareli", "bge": "Bauria", "bgf": "Bangandu", "bgg": "Bugun", "bgi": "Giangan", "bgj": "Bangolan", "bgk": "Bit; Buxinhua", "bgl": "Bo (Laos)", "bgn": "Western Balochi", "bgo": "Baga Koga", "bgp": "Eastern Balochi", "bgq": "Bagri", "bgr": "Bawm Chin", "bgs": "Tagabawa", "bgt": "Bughotu", "bgu": "Mbongno", "bgv": "Warkay-Bipim", "bgw": "Bhatri", "bgx": "Balkan Gagauz Turkish", "bgy": "Benggoi", "bgz": "Banggai", "bh": "Bihari languages", "bha": "Bharia", "bhb": "Bhili", "bhc": "Biga", "bhd": "Bhadrawahi", "bhe": "Bhaya", "bhf": "Odiai", "bhg": "Binandere", "bhh": "Bukharic", "bhi": "Bhilali", "bhj": "Bahing", "bhl": "Bimin", "bhm": "Bathari", "bhn": "Bohtan Neo-Aramaic", "bho": "Bhojpuri", "bhp": "Bima", "bhq": "Tukang Besi South", "bhr": "Bara Malagasy", "bhs": "Buwal", "bht": "Bhattiyali", "bhu": "Bhunjia", "bhv": "Bahau", "bhw": "Biak", "bhx": "Bhalay", "bhy": "Bhele", "bhz": "Bada (Indonesia)", "bi": "Bislama", "bia": "Badimaya", "bib": "Bissa; Bisa", "bid": "Bidiyo", "bie": "Bepour", "bif": "Biafada", "big": "Biangai", "bik": "Bikol", "bil": "Bile", "bim": "Bimoba", "bin": "Bini; Edo", "bio": "Nai", "bip": "Bila", "biq": "Bipi", "bir": "Bisorio", "bit": "Berinomo", "biu": "Biete", "biv": "Southern Birifor", "biw": "Kol (Cameroon)", "bix": "Bijori", "biy": "Birhor", "biz": "Baloi", "bja": "Budza", "bjb": "Banggarla", "bjc": "Bariji", "bje": "Biao-Jiao Mien", "bjf": "Barzani Jewish Neo-Aramaic", "bjg": "Bidyogo", "bjh": "Bahinemo", "bji": "Burji", "bjj": "Kanauji", "bjk": "Barok", "bjl": "Bulu (Papua New Guinea)", "bjm": "Bajelani", "bjn": "Banjar", "bjo": "Mid-Southern Banda", "bjp": "Fanamaket", "bjr": "Binumarien", "bjs": "Bajan", "bjt": "Balanta-Ganja", "bju": "Busuu", "bjv": "Bedjond", "bjw": "Bakwé", "bjx": "Banao Itneg", "bjy": "Bayali", "bjz": "Baruga", "bka": "Kyak", "bkc": "Baka (Cameroon)", "bkd": "Binukid; Talaandig", "bkf": "Beeke", "bkg": "Buraka", "bkh": "Bakoko", "bki": "Baki", "bkj": "Pande", "bkk": "Brokskat", "bkl": "Berik", "bkm": "Kom (Cameroon)", "bkn": "Bukitan", "bko": "Kwa'", "bkp": "Boko (Democratic Republic of Congo)", "bkq": "Bakairí", "bkr": "Bakumpai", "bks": "Northern Sorsoganon", "bkt": "Boloki", "bku": "Buhid", "bkv": "Bekwarra", "bkw": "Bekwel", "bkx": "Baikeno", "bky": "Bokyi", "bkz": "Bungku", "bla": "Siksika", "blb": "Bilua", "blc": "Bella Coola", "bld": "Bolango", "ble": "Balanta-Kentohe", "blf": "Buol", "blh": "Kuwaa", "bli": "Bolia", "blj": "Bolongan", "blk": "Pa'o Karen; Pa'O", "bll": "Biloxi", "blm": "Beli (South Sudan)", "bln": "Southern Catanduanes Bikol", "blo": "Anii", "blp": "Blablanga", "blq": "Baluan-Pam", "blr": "Blang", "bls": "Balaesang", "blt": "Tai Dam", "blv": "Kibala; Bolo", "blw": "Balangao", "blx": "Mag-Indi Ayta", "bly": "Notre", "blz": "Balantak", "bm": "Bambara", "bma": "Lame", "bmb": "Bembe", "bmc": "Biem", "bmd": "Baga Manduri", "bme": "Limassa", "bmf": "Bom-Kim", "bmg": "Bamwe", "bmh": "Kein", "bmi": "Bagirmi", "bmj": "Bote-Majhi", "bmk": "Ghayavi", "bml": "Bomboli", "bmm": "Northern Betsimisaraka Malagasy", "bmn": "Bina (Papua New Guinea)", "bmo": "Bambalang", "bmp": "Bulgebi", "bmq": "Bomu", "bmr": "Muinane", "bms": "Bilma Kanuri", "bmt": "Biao Mon", "bmu": "Somba-Siawari", "bmv": "Bum", "bmw": "Bomwali", "bmx": "Baimak", "bmz": "Baramu", "bn": "Bengali; Bangla", "bna": "Bonerate", "bnb": "Bookan", "bnc": "Bontok", "bnd": "Banda (Indonesia)", "bne": "Bintauna", "bnf": "Masiwang", "bng": "Benga", "bni": "Bangi", "bnj": "Eastern Tawbuid", "bnk": "Bierebo", "bnl": "Boon", "bnm": "Batanga", "bnn": "Bunun", "bno": "Bantoanon", "bnp": "Bola", "bnq": "Bantik", "bnr": "Butmas-Tur", "bns": "Bundeli", "bnt": "Bantu languages", "bnu": "Bentong", "bnv": "Bonerif; Beneraf; Edwas", "bnw": "Bisis", "bnx": "Bangubangu", "bny": "Bintulu", "bnz": "Beezen", "bo": "Tibetan", "boa": "Bora", "bob": "Aweer", "boe": "Mundabli", "bof": "Bolon", "bog": "Bamako Sign Language", "boh": "Boma", "boi": "Barbareño", "boj": "Anjam", "bok": "Bonjo", "bol": "Bole", "bom": "Berom", "bon": "Bine", "boo": "Tiemacèwè Bozo", "bop": "Bonkiman", "boq": "Bogaya", "bor": "Borôro", "bot": "Bongo", "bou": "Bondei", "bov": "Tuwuli", "bow": "Rema", "box": "Buamu", "boy": "Bodo (Central African Republic)", "boz": "Tiéyaxo Bozo", "bpa": "Daakaka", "bpc": "Mbuk", "bpd": "Banda-Banda", "bpe": "Bauni", "bpg": "Bonggo", "bph": "Botlikh", "bpi": "Bagupi", "bpj": "Binji", "bpk": "Orowe; 'Ôrôê", "bpl": "Broome Pearling Lugger Pidgin", "bpm": "Biyom", "bpn": "Dzao Min", "bpo": "Anasi", "bpp": "Kaure", "bpq": "Banda Malay", "bpr": "Koronadal Blaan", "bps": "Sarangani Blaan", "bpt": "Barrow Point", "bpu": "Bongu", "bpv": "Bian Marind", "bpw": "Bo (Papua New Guinea)", "bpx": "Palya Bareli", "bpy": "Bishnupriya", "bpz": "Bilba", "bqa": "Tchumbuli", "bqb": "Bagusa", "bqc": "Boko (Benin); Boo", "bqd": "Bung", "bqf": "Baga Kaloum", "bqg": "Bago-Kusuntu", "bqh": "Baima", "bqi": "Bakhtiari", "bqj": "Bandial", "bqk": "Banda-Mbrès", "bql": "Bilakura", "bqm": "Wumboko", "bqn": "Bulgarian Sign Language", "bqo": "Balo", "bqp": "Busa", "bqq": "Biritai", "bqr": "Burusu", "bqs": "Bosngun", "bqt": "Bamukumbit", "bqu": "Boguru", "bqv": "Koro Wachi; Begbere-Ejar", "bqw": "Buru (Nigeria)", "bqx": "Baangi", "bqy": "Bengkala Sign Language", "bqz": "Bakaka", "br": "Breton", "bra": "Braj", "brb": "Brao; Lave", "brc": "Berbice Creole Dutch", "brd": "Baraamu", "brf": "Bira", "brg": "Baure", "brh": "Brahui", "bri": "Mokpwe", "brj": "Bieria", "brk": "Birked", "brl": "Birwa", "brm": "Barambu", "brn": "Boruca", "bro": "Brokkat", "brp": "Barapasi", "brq": "Breri", "brr": "Birao", "brs": "Baras", "brt": "Bitare", "bru": "Eastern Bru", "brv": "Western Bru", "brw": "Bellari", "brx": "Bodo (India)", "bry": "Burui", "brz": "Bilbil", "bs": "Bosnian", "bsa": "Abinomn", "bsb": "Brunei Bisaya", "bsc": "Bassari; Oniyan", "bse": "Wushi", "bsf": "Bauchi", "bsg": "Bashkardi", "bsh": "Kati", "bsi": "Bassossi", "bsj": "Bangwinji", "bsk": "Burushaski", "bsl": "Basa-Gumna", "bsm": "Busami", "bsn": "Barasana-Eduria", "bso": "Buso", "bsp": "Baga Sitemu", "bsq": "Bassa", "bsr": "Bassa-Kontagora", "bss": "Akoose", "bst": "Basketo", "bsu": "Bahonsuai", "bsv": "Baga Sobané", "bsw": "Baiso", "bsx": "Yangkam", "bsy": "Sabah Bisaya", "bta": "Bata", "btc": "Bati (Cameroon)", "btd": "Batak Dairi", "bte": "Gamo-Ningi", "btf": "Birgit", "btg": "Gagnoa Bété", "bth": "Biatah Bidayuh", "bti": "Burate", "btj": "Bacanese Malay", "btk": "Batak languages", "btm": "Batak Mandailing", "btn": "Ratagnon", "bto": "Rinconada Bikol", "btp": "Budibud", "btq": "Batek", "btr": "Baetora", "bts": "Batak Simalungun", "btt": "Bete-Bendi", "btu": "Batu", "btv": "Bateri", "btw": "Butuanon", "btx": "Batak Karo", "bty": "Bobot", "btz": "Batak Alas-Kluet", "bua": "Buriat", "bub": "Bua", "buc": "Bushi", "bud": "Ntcham", "bue": "Beothuk", "buf": "Bushoong", "bug": "Buginese", "buh": "Younuo Bunu", "bui": "Bongili", "buj": "Basa-Gurmana", "buk": "Bugawac", "bum": "Bulu (Cameroon)", "bun": "Sherbro", "buo": "Terei", "bup": "Busoa", "buq": "Brem", "bus": "Bokobaru", "but": "Bungain", "buu": "Budu", "buv": "Bun", "buw": "Bubi", "bux": "Boghom", "buy": "Bullom So", "buz": "Bukwen", "bva": "Barein", "bvb": "Bube", "bvc": "Baelelea", "bvd": "Baeggu", "bve": "Berau Malay", "bvf": "Boor", "bvg": "Bonkeng", "bvh": "Bure", "bvi": "Belanda Viri", "bvj": "Baan", "bvk": "Bukat", "bvl": "Bolivian Sign Language", "bvm": "Bamunka", "bvn": "Buna", "bvo": "Bolgo", "bvp": "Bumang", "bvq": "Birri", "bvr": "Burarra", "bvt": "Bati (Indonesia)", "bvu": "Bukit Malay", "bvv": "Baniva", "bvw": "Boga", "bvx": "Dibole", "bvy": "Baybayanon", "bvz": "Bauzi", "bwa": "Bwatoo", "bwb": "Namosi-Naitasiri-Serua", "bwc": "Bwile", "bwd": "Bwaidoka", "bwe": "Bwe Karen", "bwf": "Boselewa", "bwg": "Barwe", "bwh": "Bishuo", "bwi": "Baniwa", "bwj": "Láá Láá Bwamu", "bwk": "Bauwaki", "bwl": "Bwela", "bwm": "Biwat", "bwn": "Wunai Bunu", "bwo": "Boro (Ethiopia); Borna (Ethiopia)", "bwp": "Mandobo Bawah", "bwq": "Southern Bobo Madaré", "bwr": "Bura-Pabir", "bws": "Bomboma", "bwt": "Bafaw-Balong", "bwu": "Buli (Ghana)", "bww": "Bwa", "bwx": "Bu-Nao Bunu", "bwy": "Cwi Bwamu", "bwz": "Bwisi", "bxa": "Tairaha", "bxb": "Belanda Bor", "bxc": "Molengue", "bxd": "Pela", "bxe": "Birale", "bxf": "Bilur; Minigir", "bxg": "Bangala", "bxh": "Buhutu", "bxi": "Pirlatapa", "bxj": "Bayungu", "bxk": "Bukusu; Lubukusu", "bxl": "Jalkunan", "bxm": "Mongolia Buriat", "bxn": "Burduna", "bxo": "Barikanchi", "bxp": "Bebil", "bxq": "Beele", "bxr": "Russia Buriat", "bxs": "Busam", "bxu": "China Buriat", "bxv": "Berakou", "bxw": "Bankagooma", "bxz": "Binahari", "bya": "Batak", "byb": "Bikya", "byc": "Ubaghara", "byd": "Benyadu'", "bye": "Pouye", "byf": "Bete", "byg": "Baygo", "byh": "Bhujel", "byi": "Buyu", "byj": "Bina (Nigeria)", "byk": "Biao", "byl": "Bayono", "bym": "Bidjara", "byn": "Bilin; Blin", "byo": "Biyo", "byp": "Bumaji", "byq": "Basay", "byr": "Baruya; Yipma", "bys": "Burak", "byt": "Berti", "byv": "Medumba", "byw": "Belhariya", "byx": "Qaqet", "byz": "Banaro", "bza": "Bandi", "bzb": "Andio", "bzc": "Southern Betsimisaraka Malagasy", "bzd": "Bribri", "bze": "Jenaama Bozo", "bzf": "Boikin", "bzg": "Babuza", "bzh": "Mapos Buang", "bzi": "Bisu", "bzj": "Belize Kriol English", "bzk": "Nicaragua Creole English", "bzl": "Boano (Sulawesi)", "bzm": "Bolondo", "bzn": "Boano (Maluku)", "bzo": "Bozaba", "bzp": "Kemberano", "bzq": "Buli (Indonesia)", "bzr": "Biri", "bzs": "Brazilian Sign Language", "bzt": "Brithenig", "bzu": "Burmeso", "bzv": "Naami", "bzw": "Basa (Nigeria)", "bzx": "Kɛlɛngaxo Bozo", "bzy": "Obanliku", "bzz": "Evant", "ca": "Catalan; Valencian", "caa": "Chortí", "cab": "Garifuna", "cac": "Chuj", "cad": "Caddo", "cae": "Lehar; Laalaa", "caf": "Southern Carrier", "cag": "Nivaclé", "cah": "Cahuarano", "cai": "Central American Indian languages", "caj": "Chané", "cak": "Kaqchikel; Cakchiquel", "cal": "Carolinian", "cam": "Cemuhî", "can": "Chambri", "cao": "Chácobo", "cap": "Chipaya", "caq": "Car Nicobarese", "car": "Galibi Carib", "cas": "Tsimané", "cau": "Caucasian languages", "cav": "Cavineña", "caw": "Callawalla", "cax": "Chiquitano", "cay": "Cayuga", "caz": "Canichana", "cba": "Chibchan languages", "cbb": "Cabiyarí", "cbc": "Carapana", "cbd": "Carijona", "cbg": "Chimila", "cbi": "Chachi", "cbj": "Ede Cabe", "cbk": "Chavacano", "cbl": "Bualkhaw Chin", "cbn": "Nyahkur", "cbo": "Izora", "cbq": "Tsucuba; Cuba", "cbr": "Cashibo-Cacataibo", "cbs": "Cashinahua", "cbt": "Chayahuita", "cbu": "Candoshi-Shapra", "cbv": "Cacua", "cbw": "Kinabalian", "cby": "Carabayo", "ccc": "Chamicuro", "ccd": "Cafundo Creole", "cce": "Chopi", "ccg": "Samba Daka", "cch": "Atsam", "ccj": "Kasanga", "ccl": "Cutchi-Swahili", "ccm": "Malaccan Creole Malay", "ccn": "North Caucasian languages", "cco": "Comaltepec Chinantec", "ccp": "Chakma", "ccr": "Cacaopera", "ccs": "South Caucasian languages", "cda": "Choni", "cdc": "Chadic languages", "cdd": "Caddoan languages", "cde": "Chenchu", "cdf": "Chiru", "cdh": "Chambeali", "cdi": "Chodri", "cdj": "Churahi", "cdm": "Chepang", "cdn": "Chaudangsi", "cdo": "Min Dong Chinese", "cdr": "Cinda-Regi-Tiyal", "cds": "Chadian Sign Language", "cdy": "Chadong", "cdz": "Koda", "ce": "Chechen", "cea": "Lower Chehalis", "ceb": "Cebuano", "ceg": "Chamacoco", "cek": "Eastern Khumi Chin", "cel": "Celtic languages", "cen": "Cen", "cet": "Centúúm", "cey": "Ekai Chin", "cfa": "Dijim-Bwilim", "cfd": "Cara", "cfg": "Como Karim", "cfm": "Falam Chin", "cga": "Changriwa", "cgc": "Kagayanen", "cgg": "Chiga", "cgk": "Chocangacakha", "ch": "Chamorro", "chb": "Chibcha", "chc": "Catawba", "chd": "Highland Oaxaca Chontal", "chf": "Tabasco Chontal", "chg": "Chagatai", "chh": "Chinook", "chj": "Ojitlán Chinantec", "chk": "Chuukese", "chl": "Cahuilla", "chm": "Mari (Russia)", "chn": "Chinook jargon", "cho": "Choctaw", "chp": "Chipewyan; Dene Suline", "chq": "Quiotepec Chinantec", "chr": "Cherokee", "cht": "Cholón", "chw": "Chuwabu", "chx": "Chantyal", "chy": "Cheyenne", "chz": "Ozumacín Chinantec", "cia": "Cia-Cia", "cib": "Ci Gbe", "cic": "Chickasaw", "cid": "Chimariko", "cie": "Cineni", "cih": "Chinali", "cik": "Chitkuli Kinnauri", "cim": "Cimbrian", "cin": "Cinta Larga", "cip": "Chiapanec", "cir": "Tiri; Haméa; Méa", "ciw": "Chippewa", "ciy": "Chaima", "cja": "Western Cham", "cje": "Chru", "cjh": "Upper Chehalis", "cji": "Chamalal", "cjk": "Chokwe", "cjm": "Eastern Cham", "cjn": "Chenapian", "cjo": "Ashéninka Pajonal", "cjp": "Cabécar", "cjs": "Shor", "cjv": "Chuave", "cjy": "Jinyu Chinese", "ckb": "Central Kurdish", "ckh": "Chak", "ckl": "Cibak", "ckm": "Chakavian", "ckn": "Kaang Chin", "cko": "Anufo", "ckq": "Kajakse", "ckr": "Kairak", "cks": "Tayo", "ckt": "Chukot", "cku": "Koasati", "ckv": "Kavalan", "ckx": "Caka", "cky": "Cakfem-Mushere", "ckz": "Cakchiquel-Quiché Mixed Language", "cla": "Ron", "clc": "Chilcotin", "cld": "Chaldean Neo-Aramaic", "cle": "Lealao Chinantec", "clh": "Chilisso", "cli": "Chakali", "clj": "Laitu Chin", "clk": "Idu-Mishmi", "cll": "Chala", "clm": "Clallam", "clo": "Lowland Oaxaca Chontal", "clt": "Lautu Chin", "clu": "Caluyanun", "clw": "Chulym", "cly": "Eastern Highland Chatino", "cma": "Maa", "cmc": "Chamic languages", "cme": "Cerma", "cmg": "Classical Mongolian", "cmi": "Emberá-Chamí", "cml": "Campalagian", "cmm": "Michigamea", "cmn": "Mandarin Chinese", "cmo": "Central Mnong", "cmr": "Mro-Khimi Chin", "cms": "Messapic", "cmt": "Camtho", "cna": "Changthang", "cnb": "Chinbon Chin", "cnc": "Côông", "cng": "Northern Qiang", "cnh": "Hakha Chin; Haka Chin", "cni": "Asháninka", "cnk": "Khumi Chin", "cnl": "Lalana Chinantec", "cno": "Con", "cnp": "Northern Ping Chinese; Northern Pinghua", "cnq": "Chung", "cnr": "Montenegrin", "cns": "Central Asmat", "cnt": "Tepetotutla Chinantec", "cnu": "Chenoua", "cnw": "Ngawn Chin", "cnx": "Middle Cornish", "co": "Corsican", "coa": "Cocos Islands Malay", "cob": "Chicomuceltec", "coc": "Cocopa", "cod": "Cocama-Cocamilla", "coe": "Koreguaje", "cof": "Colorado", "cog": "Chong", "coh": "Chonyi-Dzihana-Kauma; Chichonyi-Chidzihana-Chikauma", "coj": "Cochimi", "cok": "Santa Teresa Cora", "col": "Columbia-Wenatchi", "com": "Comanche", "con": "Cofán", "coo": "Comox", "cop": "Coptic", "coq": "Coquille", "cot": "Caquinte", "cou": "Wamey", "cov": "Cao Miao", "cow": "Cowlitz", "cox": "Nanti", "coz": "Chochotec", "cpa": "Palantla Chinantec", "cpb": "Ucayali-Yurúa Ashéninka", "cpc": "Ajyíninka Apurucayali", "cpe": "English-based creoles and pidgins", "cpf": "French-based creoles and pidgins", "cpg": "Cappadocian Greek", "cpi": "Chinese Pidgin English", "cpn": "Cherepon", "cpo": "Kpeego", "cpp": "Portuguese-based creoles and pidgins", "cps": "Capiznon", "cpu": "Pichis Ashéninka", "cpx": "Pu-Xian Chinese", "cpy": "South Ucayali Ashéninka", "cqd": "Chuanqiandian Cluster Miao", "cr": "Cree", "cra": "Chara", "crb": "Island Carib", "crc": "Lonwolwol", "crd": "Coeur d'Alene", "crf": "Caramanta", "crg": "Michif", "crh": "Crimean Tatar; Crimean Turkish", "cri": "Sãotomense", "crj": "Southern East Cree", "crk": "Plains Cree", "crl": "Northern East Cree", "crm": "Moose Cree", "crn": "El Nayar Cora", "cro": "Crow", "crp": "Creoles and pidgins", "crq": "Iyo'wujwa Chorote", "crr": "Carolina Algonquian", "crs": "Seselwa Creole French", "crt": "Iyojwa'ja Chorote", "crv": "Chaura", "crw": "Chrau", "crx": "Carrier", "cry": "Cori", "crz": "Cruzeño", "cs": "Czech", "csa": "Chiltepec Chinantec", "csb": "Kashubian", "csc": "Catalan Sign Language; Lengua de señas catalana; Llengua de Signes Catalana", "csd": "Chiangmai Sign Language", "cse": "Czech Sign Language", "csf": "Cuba Sign Language", "csg": "Chilean Sign Language", "csh": "Asho Chin", "csi": "Coast Miwok", "csj": "Songlai Chin", "csk": "Jola-Kasa", "csl": "Chinese Sign Language", "csm": "Central Sierra Miwok", "csn": "Colombian Sign Language", "cso": "Sochiapam Chinantec; Sochiapan Chinantec", "csp": "Southern Ping Chinese; Southern Pinghua", "csq": "Croatia Sign Language", "csr": "Costa Rican Sign Language", "css": "Southern Ohlone", "cst": "Northern Ohlone", "csu": "Central Sudanic languages", "csv": "Sumtu Chin", "csw": "Swampy Cree", "csx": "Cambodian Sign Language", "csy": "Siyin Chin", "csz": "Coos", "cta": "Tataltepec Chatino", "ctc": "Chetco", "ctd": "Tedim Chin", "cte": "Tepinapa Chinantec", "ctg": "Chittagonian", "cth": "Thaiphum Chin", "ctl": "Tlacoatzintepec Chinantec", "ctm": "Chitimacha", "ctn": "Chhintange", "cto": "Emberá-Catío", "ctp": "Western Highland Chatino", "cts": "Northern Catanduanes Bikol", "ctt": "Wayanad Chetti", "ctu": "Chol", "cty": "Moundadan Chetty", "ctz": "Zacatepec Chatino", "cu": "Church Slavic; Church Slavonic; Old Bulgarian; Old Church Slavonic; Old Slavonic", "cua": "Cua", "cub": "Cubeo", "cuc": "Usila Chinantec", "cuh": "Chuka; Gichuka", "cui": "Cuiba", "cuj": "Mashco Piro", "cuk": "San Blas Kuna", "cul": "Culina; Kulina", "cuo": "Cumanagoto", "cup": "Cupeño", "cuq": "Cun", "cur": "Chhulung", "cus": "Cushitic languages", "cut": "Teutila Cuicatec", "cuu": "Tai Ya", "cuv": "Cuvok", "cuw": "Chukwa", "cux": "Tepeuxila Cuicatec", "cuy": "Cuitlatec", "cv": "Chuvash", "cvg": "Chug", "cvn": "Valle Nacional Chinantec", "cwa": "Kabwa", "cwb": "Maindo", "cwd": "Woods Cree", "cwe": "Kwere", "cwg": "Chewong; Cheq Wong", "cwt": "Kuwaataay", "cy": "Welsh", "cya": "Nopala Chatino", "cyb": "Cayubaba", "cyo": "Cuyonon", "czh": "Huizhou Chinese", "czk": "Knaanic", "czn": "Zenzontepec Chatino", "czo": "Min Zhong Chinese", "czt": "Zotung Chin", "da": "Danish", "daa": "Dangaléat", "dac": "Dambi", "dad": "Marik", "dae": "Duupa", "dag": "Dagbani", "dah": "Gwahatike", "dai": "Day", "daj": "Dar Fur Daju", "dak": "Dakota", "dal": "Dahalo", "dam": "Damakawa", "dao": "Daai Chin", "daq": "Dandami Maria", "dar": "Dargwa", "das": "Daho-Doo", "dau": "Dar Sila Daju", "dav": "Taita; Dawida", "daw": "Davawenyo", "dax": "Dayi", "day": "Land Dayak languages", "daz": "Dao", "dba": "Bangime", "dbb": "Deno", "dbd": "Dadiya", "dbe": "Dabe", "dbf": "Edopi", "dbg": "Dogul Dom Dogon", "dbi": "Doka", "dbj": "Ida'an", "dbl": "Dyirbal", "dbm": "Duguri", "dbn": "Duriankere", "dbo": "Dulbu", "dbp": "Duwai", "dbq": "Daba", "dbr": "Dabarre", "dbt": "Ben Tey Dogon", "dbu": "Bondum Dom Dogon", "dbv": "Dungu", "dbw": "Bankan Tey Dogon", "dby": "Dibiyaso", "dcc": "Deccan", "dcr": "Negerhollands", "dda": "Dadi Dadi", "ddd": "Dongotono", "dde": "Doondo", "ddg": "Fataluku", "ddi": "West Goodenough", "ddj": "Jaru", "ddn": "Dendi (Benin)", "ddo": "Dido", "ddr": "Dhudhuroa", "dds": "Donno So Dogon", "ddw": "Dawera-Daweloor", "de": "German", "dec": "Dagik", "ded": "Dedua", "dee": "Dewoin", "def": "Dezfuli", "deg": "Degema", "deh": "Dehwari", "dei": "Demisa", "dek": "Dek", "del": "Delaware", "dem": "Dem", "den": "Slave (Athapascan)", "dep": "Pidgin Delaware", "deq": "Dendi (Central African Republic)", "der": "Deori", "des": "Desano", "dev": "Domung", "dez": "Dengese", "dga": "Southern Dagaare", "dgb": "Bunoge Dogon", "dgc": "Casiguran Dumagat Agta", "dgd": "Dagaari Dioula", "dge": "Degenan", "dgg": "Doga", "dgh": "Dghwede", "dgi": "Northern Dagara", "dgk": "Dagba", "dgl": "Andaandi; Dongolawi", "dgn": "Dagoman", "dgo": "Dogri (individual language)", "dgr": "Dogrib; Tłı̨chǫ", "dgs": "Dogoso", "dgt": "Ndra'ngith", "dgw": "Daungwurrung", "dgx": "Doghoro", "dgz": "Daga", "dhd": "Dhundari", "dhg": "Dhangu-Djangu; Dhangu; Djangu", "dhi": "Dhimal", "dhl": "Dhalandji", "dhm": "Zemba", "dhn": "Dhanki", "dho": "Dhodia", "dhr": "Dhargari", "dhs": "Dhaiso", "dhu": "Dhurga", "dhv": "Dehu; Drehu", "dhw": "Dhanwar (Nepal)", "dhx": "Dhungaloo", "dia": "Dia", "dib": "South Central Dinka", "dic": "Lakota Dida", "did": "Didinga", "dif": "Dieri; Diyari", "dig": "Digo; Chidigo", "dih": "Kumiai", "dii": "Dimbong", "dij": "Dai", "dik": "Southwestern Dinka", "dil": "Dilling", "dim": "Dime", "din": "Dinka", "dio": "Dibo", "dip": "Northeastern Dinka", "diq": "Dimli (individual language)", "dir": "Dirim", "dis": "Dimasa", "diu": "Diriku", "diw": "Northwestern Dinka", "dix": "Dixon Reef", "diy": "Diuwe", "diz": "Ding", "dja": "Djadjawurrung", "djb": "Djinba", "djc": "Dar Daju Daju", "djd": "Djamindjung; Ngaliwurru", "dje": "Zarma", "djf": "Djangun", "dji": "Djinang", "djj": "Djeebbana", "djk": "Eastern Maroon Creole; Businenge Tongo; Nenge", "djm": "Jamsay Dogon", "djn": "Jawoyn; Djauan", "djo": "Jangkang", "djr": "Djambarrpuyngu", "dju": "Kapriman", "djw": "Djawi", "dka": "Dakpakha", "dkg": "Kadung", "dkk": "Dakka", "dkr": "Kuijau", "dks": "Southeastern Dinka", "dkx": "Mazagway", "dlg": "Dolgan", "dlk": "Dahalik", "dlm": "Dalmatian", "dln": "Darlong", "dma": "Duma", "dmb": "Mombo Dogon", "dmc": "Gavak", "dmd": "Madhi Madhi", "dme": "Dugwor", "dmf": "Medefaidrin", "dmg": "Upper Kinabatangan", "dmk": "Domaaki", "dml": "Dameli", "dmm": "Dama", "dmn": "Mande languages", "dmo": "Kemedzung", "dmr": "East Damar", "dms": "Dampelas", "dmu": "Dubu; Tebi", "dmv": "Dumpas", "dmw": "Mudburra", "dmx": "Dema", "dmy": "Demta; Sowari", "dna": "Upper Grand Valley Dani", "dnd": "Daonda", "dne": "Ndendeule", "dng": "Dungan", "dni": "Lower Grand Valley Dani", "dnj": "Dan", "dnk": "Dengka", "dnn": "Dzùùngoo", "dno": "Ndrulo; Northern Lendu", "dnr": "Danaru", "dnt": "Mid Grand Valley Dani", "dnu": "Danau", "dnv": "Danu", "dnw": "Western Dani", "dny": "Dení", "doa": "Dom", "dob": "Dobu", "doc": "Northern Dong", "doe": "Doe", "dof": "Domu", "doh": "Dong", "doi": "Dogri (macrolanguage)", "dok": "Dondo", "dol": "Doso", "don": "Toura (Papua New Guinea)", "doo": "Dongo", "dop": "Lukpa", "doq": "Dominican Sign Language", "dor": "Dori'o", "dos": "Dogosé", "dot": "Dass", "dov": "Dombe", "dow": "Doyayo", "dox": "Bussa", "doy": "Dompo", "doz": "Dorze", "dpp": "Papar", "dra": "Dravidian languages", "drb": "Dair", "drc": "Minderico", "drd": "Darmiya", "dre": "Dolpo", "drg": "Rungus", "dri": "C'Lela", "drl": "Paakantyi", "drn": "West Damar", "dro": "Daro-Matu Melanau", "drq": "Dura", "drs": "Gedeo", "drt": "Drents", "dru": "Rukai", "dry": "Darai", "dsb": "Lower Sorbian", "dse": "Dutch Sign Language", "dsh": "Daasanach", "dsi": "Disa", "dsl": "Danish Sign Language", "dsn": "Dusner", "dso": "Desiya", "dsq": "Tadaksahak", "dsz": "Mardin Sign Language", "dta": "Daur", "dtb": "Labuk-Kinabatangan Kadazan", "dtd": "Ditidaht", "dth": "Adithinngithigh", "dti": "Ana Tinga Dogon", "dtk": "Tene Kan Dogon", "dtm": "Tomo Kan Dogon", "dtn": "Daatsʼíin", "dto": "Tommo So Dogon", "dtp": "Kadazan Dusun; Central Dusun", "dtr": "Lotud", "dts": "Toro So Dogon", "dtt": "Toro Tegu Dogon", "dtu": "Tebul Ure Dogon", "dty": "Dotyali", "dua": "Duala", "dub": "Dubli", "duc": "Duna", "due": "Umiray Dumaget Agta", "duf": "Dumbea; Drubea", "dug": "Duruma; Chiduruma", "duh": "Dungra Bhil", "dui": "Dumun", "duk": "Uyajitaya", "dul": "Alabat Island Agta", "dum": "Middle Dutch (ca. 1050-1350)", "dun": "Dusun Deyah", "duo": "Dupaninan Agta", "dup": "Duano", "duq": "Dusun Malang", "dur": "Dii", "dus": "Dumi", "duu": "Drung", "duv": "Duvle", "duw": "Dusun Witu", "dux": "Duungooma", "duy": "Dicamay Agta", "duz": "Duli-Gey", "dv": "Dhivehi; Divehi; Maldivian", "dva": "Duau", "dwa": "Diri", "dwk": "Dawik Kui", "dwr": "Dawro", "dws": "Dutton World Speedwords", "dwu": "Dhuwal", "dww": "Dawawa", "dwy": "Dhuwaya", "dwz": "Dewas Rai", "dya": "Dyan", "dyb": "Dyaberdyaber", "dyd": "Dyugun", "dyg": "Villa Viciosa Agta", "dyi": "Djimini Senoufo", "dym": "Yanda Dom Dogon", "dyn": "Dyangadi; Dhanggatti", "dyo": "Jola-Fonyi", "dyu": "Dyula", "dyy": "Djabugay; Dyaabugay", "dz": "Dzongkha", "dza": "Tunzu", "dze": "Djiwarli", "dzg": "Dazaga", "dzl": "Dzalakha", "dzn": "Dzando", "eaa": "Karenggapa", "ebc": "Beginci", "ebg": "Ebughu", "ebk": "Eastern Bontok", "ebo": "Teke-Ebo", "ebr": "Ebrié", "ebu": "Embu; Kiembu", "ecr": "Eteocretan", "ecs": "Ecuadorian Sign Language", "ecy": "Eteocypriot", "ee": "Ewe", "eee": "E", "efa": "Efai", "efe": "Efe", "efi": "Efik", "ega": "Ega", "egl": "Emilian", "egm": "Benamanga", "ego": "Eggon", "egx": "Egyptian languages", "egy": "Egyptian (Ancient)", "ehs": "Miyakubo Sign Language", "ehu": "Ehueun", "eip": "Eipomek", "eit": "Eitiep", "eiv": "Askopan", "eja": "Ejamat", "eka": "Ekajuk", "eke": "Ekit", "ekg": "Ekari", "eki": "Eki", "ekk": "Standard Estonian", "ekl": "Kol (Bangladesh); Kol", "ekm": "Elip", "eko": "Koti", "ekp": "Ekpeye", "ekr": "Yace", "eky": "Eastern Kayah", "el": "Modern Greek (1453-)", "ele": "Elepi", "elh": "El Hugeirat", "eli": "Nding", "elk": "Elkei", "elm": "Eleme", "elo": "El Molo", "elu": "Elu", "elx": "Elamite", "ema": "Emai-Iuleha-Ora", "emb": "Embaloh", "eme": "Emerillon", "emg": "Eastern Meohang", "emi": "Mussau-Emira", "emk": "Eastern Maninkakan", "emm": "Mamulique", "emn": "Eman", "emp": "Northern Emberá", "emq": "Eastern Minyag", "ems": "Pacific Gulf Yupik", "emu": "Eastern Muria", "emw": "Emplawas", "emx": "Erromintxela", "emy": "Epigraphic Mayan", "emz": "Mbessa", "en": "English", "ena": "Apali", "enb": "Markweeta", "enc": "En", "end": "Ende", "enf": "Forest Enets", "enh": "Tundra Enets", "enl": "Enlhet", "enm": "Middle English (1100-1500)", "enn": "Engenni", "eno": "Enggano", "enq": "Enga", "enr": "Emumu; Emem", "enu": "Enu", "env": "Enwan (Edo State)", "enw": "Enwan (Akwa Ibom State)", "enx": "Enxet", "eo": "Esperanto", "eot": "Beti (Côte d'Ivoire)", "epi": "Epie", "era": "Eravallan", "erg": "Sie", "erh": "Eruwa", "eri": "Ogea", "erk": "South Efate", "ero": "Horpa", "err": "Erre", "ers": "Ersu", "ert": "Eritai", "erw": "Erokwanas", "es": "Spanish; Castilian", "ese": "Ese Ejja", "esg": "Aheri Gondi", "esh": "Eshtehardi", "esi": "North Alaskan Inupiatun", "esk": "Northwest Alaska Inupiatun", "esl": "Egypt Sign Language", "esm": "Esuma", "esn": "Salvadoran Sign Language", "eso": "Estonian Sign Language", "esq": "Esselen", "ess": "Central Siberian Yupik", "esu": "Central Yupik", "esx": "Eskimo-Aleut languages", "esy": "Eskayan", "et": "Estonian", "etb": "Etebi", "etc": "Etchemin", "eth": "Ethiopian Sign Language", "etn": "Eton (Vanuatu)", "eto": "Eton (Cameroon)", "etr": "Edolo", "ets": "Yekhee", "ett": "Etruscan", "etu": "Ejagham", "etx": "Eten", "etz": "Semimi", "eu": "Basque", "euq": "Basque (family)", "eve": "Even", "evh": "Uvbie", "evn": "Evenki", "ewo": "Ewondo", "ext": "Extremaduran", "eya": "Eyak", "eyo": "Keiyo", "eza": "Ezaa", "eze": "Uzekwe", "fa": "Persian", "faa": "Fasu", "fab": "Fa d'Ambu", "fad": "Wagi", "faf": "Fagani", "fag": "Finongan", "fah": "Baissa Fali", "fai": "Faiwol", "faj": "Faita", "fak": "Fang (Cameroon)", "fal": "South Fali", "fam": "Fam", "fan": "Fang (Equatorial Guinea)", "fap": "Paloor", "far": "Fataleka", "fat": "Fanti", "fau": "Fayu", "fax": "Fala", "fay": "Southwestern Fars", "faz": "Northwestern Fars", "fbl": "West Albay Bikol", "fcs": "Quebec Sign Language", "fer": "Feroge", "ff": "Fulah", "ffi": "Foia Foia", "ffm": "Maasina Fulfulde", "fgr": "Fongoro", "fi": "Finnish", "fia": "Nobiin", "fie": "Fyer", "fif": "Faifi", "fil": "Filipino; Pilipino", "fip": "Fipa", "fir": "Firan", "fit": "Tornedalen Finnish; Meänkieli", "fiu": "Finno-Ugrian languages", "fiw": "Fiwaga", "fj": "Fijian", "fkk": "Kirya-Konzəl", "fkv": "Kven Finnish", "fla": "Kalispel-Pend d'Oreille", "flh": "Foau", "fli": "Fali", "fll": "North Fali", "fln": "Flinders Island", "flr": "Fuliiru", "fly": "Flaaitaal; Tsotsitaal", "fmp": "Fe'fe'", "fmu": "Far Western Muria", "fnb": "Fanbak", "fng": "Fanagalo", "fni": "Fania", "fo": "Faroese", "fod": "Foodo", "foi": "Foi", "fom": "Foma", "fon": "Fon", "for": "Fore", "fos": "Siraya", "fox": "Formosan languages", "fpe": "Fernando Po Creole English", "fqs": "Fas", "fr": "French", "frc": "Cajun French", "frd": "Fordata", "frk": "Frankish", "frm": "Middle French (ca. 1400-1600)", "fro": "Old French (842-ca. 1400)", "frp": "Arpitan; Francoprovençal", "frq": "Forak", "frr": "Northern Frisian", "frs": "Eastern Frisian", "frt": "Fortsenal", "fse": "Finnish Sign Language", "fsl": "French Sign Language", "fss": "Finland-Swedish Sign Language; finlandssvenskt teckenspråk; suomenruotsalainen viittomakieli", "fub": "Adamawa Fulfulde", "fuc": "Pulaar", "fud": "East Futuna", "fue": "Borgu Fulfulde", "fuf": "Pular", "fuh": "Western Niger Fulfulde", "fui": "Bagirmi Fulfulde", "fuj": "Ko", "fum": "Fum", "fun": "Fulniô", "fuq": "Central-Eastern Niger Fulfulde", "fur": "Friulian", "fut": "Futuna-Aniwa", "fuu": "Furu", "fuv": "Nigerian Fulfulde", "fuy": "Fuyug", "fvr": "Fur", "fwa": "Fwâi", "fwe": "Fwe", "fy": "Western Frisian", "ga": "Irish", "gaa": "Ga", "gab": "Gabri", "gac": "Mixed Great Andamanese", "gad": "Gaddang", "gae": "Guarequena", "gaf": "Gende", "gag": "Gagauz", "gah": "Alekano", "gai": "Borei", "gaj": "Gadsup", "gak": "Gamkonora", "gal": "Galolen", "gam": "Kandawo", "gan": "Gan Chinese", "gao": "Gants", "gap": "Gal", "gaq": "Gata'", "gar": "Galeya", "gas": "Adiwasi Garasia", "gat": "Kenati", "gau": "Mudhili Gadaba", "gaw": "Nobonob", "gax": "Borana-Arsi-Guji Oromo", "gay": "Gayo", "gaz": "West Central Oromo", "gba": "Gbaya (Central African Republic)", "gbb": "Kaytetye", "gbd": "Karajarri", "gbe": "Niksek", "gbf": "Gaikundi", "gbg": "Gbanziri", "gbh": "Defi Gbe", "gbi": "Galela", "gbj": "Bodo Gadaba", "gbk": "Gaddi", "gbl": "Gamit", "gbm": "Garhwali", "gbn": "Mo'da", "gbo": "Northern Grebo", "gbp": "Gbaya-Bossangoa", "gbq": "Gbaya-Bozoum", "gbr": "Gbagyi", "gbs": "Gbesi Gbe", "gbu": "Gagadu", "gbv": "Gbanu", "gbw": "Gabi-Gabi", "gbx": "Eastern Xwla Gbe", "gby": "Gbari", "gbz": "Zoroastrian Dari", "gcc": "Mali", "gcd": "Ganggalida", "gce": "Galice", "gcf": "Guadeloupean Creole French", "gcl": "Grenadian Creole English", "gcn": "Gaina", "gcr": "Guianese Creole French", "gct": "Colonia Tovar German", "gd": "Scottish Gaelic; Gaelic", "gda": "Gade Lohar", "gdb": "Pottangi Ollar Gadaba", "gdc": "Gugu Badhun", "gdd": "Gedaged", "gde": "Gude", "gdf": "Guduf-Gava", "gdg": "Ga'dang", "gdh": "Gadjerawang; Gajirrabeng", "gdi": "Gundi", "gdj": "Gurdjar", "gdk": "Gadang", "gdl": "Dirasha", "gdm": "Laal", "gdn": "Umanakaina", "gdo": "Ghodoberi", "gdq": "Mehri", "gdr": "Wipi", "gds": "Ghandruk Sign Language", "gdt": "Kungardutyi", "gdu": "Gudu", "gdx": "Godwari", "gea": "Geruma", "geb": "Kire", "gec": "Gboloo Grebo", "ged": "Gade", "gef": "Gerai", "geg": "Gengle", "geh": "Hutterite German; Hutterisch", "gei": "Gebe", "gej": "Gen", "gek": "Ywom", "gel": "ut-Ma'in", "gem": "Germanic languages", "geq": "Geme", "ges": "Geser-Gorom", "gev": "Eviya", "gew": "Gera", "gex": "Garre", "gey": "Enya", "gez": "Geez", "gfk": "Patpatar", "gft": "Gafat", "gga": "Gao", "ggb": "Gbii", "ggd": "Gugadj", "gge": "Gurr-goni", "ggg": "Gurgula", "ggk": "Kungarakany", "ggl": "Ganglau", "ggt": "Gitua", "ggu": "Gagu; Gban", "ggw": "Gogodala", "gha": "Ghadamès", "ghc": "Hiberno-Scottish Gaelic", "ghe": "Southern Ghale", "ghh": "Northern Ghale", "ghk": "Geko Karen", "ghl": "Ghulfan", "ghn": "Ghanongga", "gho": "Ghomara", "ghr": "Ghera", "ghs": "Guhu-Samane", "ght": "Kuke; Kutang Ghale", "gia": "Kija", "gib": "Gibanawa", "gic": "Gail", "gid": "Gidar", "gie": "Gaɓogbo; Guébie", "gig": "Goaria", "gih": "Githabul", "gii": "Girirra", "gil": "Gilbertese", "gim": "Gimi (Eastern Highlands)", "gin": "Hinukh", "gip": "Gimi (West New Britain)", "giq": "Green Gelao", "gir": "Red Gelao", "gis": "North Giziga", "git": "Gitxsan", "giu": "Mulao", "giw": "White Gelao", "gix": "Gilima", "giy": "Giyug", "giz": "South Giziga", "gjk": "Kachi Koli", "gjm": "Gunditjmara", "gjn": "Gonja", "gjr": "Gurindji Kriol", "gju": "Gujari", "gka": "Guya", "gkd": "Magɨ (Madang Province)", "gke": "Ndai", "gkn": "Gokana", "gko": "Kok-Nar", "gkp": "Guinea Kpelle", "gku": "ǂUngkue", "gl": "Galician", "glb": "Belning", "glc": "Bon Gula", "gld": "Nanai", "glh": "Northwest Pashai; Northwest Pashayi", "glj": "Gula Iro", "glk": "Gilaki", "gll": "Garlali", "glo": "Galambu", "glr": "Glaro-Twabo", "glu": "Gula (Chad)", "glw": "Glavda", "gly": "Gule", "gma": "Gambera", "gmb": "Gula'alaa", "gmd": "Mághdì", "gme": "East Germanic languages", "gmg": "Magɨyi", "gmh": "Middle High German (ca. 1050-1500)", "gml": "Middle Low German", "gmm": "Gbaya-Mbodomo", "gmn": "Gimnime", "gmq": "North Germanic languages", "gmr": "Mirning; Mirniny", "gmu": "Gumalu", "gmv": "Gamo", "gmw": "West Germanic languages", "gmx": "Magoma", "gmy": "Mycenaean Greek", "gmz": "Mgbolizhia", "gn": "Guarani", "gna": "Kaansa", "gnb": "Gangte", "gnc": "Guanche", "gnd": "Zulgo-Gemzek", "gne": "Ganang", "gng": "Ngangam", "gnh": "Lere", "gni": "Gooniyandi", "gnj": "Ngen", "gnk": "ǁGana", "gnl": "Gangulu", "gnm": "Ginuman", "gnn": "Gumatj", "gno": "Northern Gondi", "gnq": "Gana", "gnr": "Gureng Gureng", "gnt": "Guntai", "gnu": "Gnau", "gnw": "Western Bolivian Guaraní", "gnz": "Ganzi", "goa": "Guro", "gob": "Playero", "goc": "Gorakor", "god": "Godié", "goe": "Gongduk", "gof": "Gofa", "gog": "Gogo", "goh": "Old High German (ca. 750-1050)", "goi": "Gobasi", "goj": "Gowlan", "gok": "Gowli", "gol": "Gola", "gom": "Goan Konkani", "gon": "Gondi", "goo": "Gone Dau", "gop": "Yeretuar", "goq": "Gorap", "gor": "Gorontalo", "gos": "Gronings", "got": "Gothic", "gou": "Gavar", "gov": "Goo", "gow": "Gorowa", "gox": "Gobu", "goy": "Goundo", "goz": "Gozarkhani", "gpa": "Gupa-Abawa", "gpe": "Ghanaian Pidgin English", "gpn": "Taiap", "gqa": "Ga'anda", "gqi": "Guiqiong", "gqn": "Guana (Brazil)", "gqr": "Gor", "gqu": "Qau", "gra": "Rajput Garasia", "grb": "Grebo", "grc": "Ancient Greek (to 1453)", "grd": "Guruntum-Mbaaru", "grg": "Madi", "grh": "Gbiri-Niragu", "gri": "Ghari", "grj": "Southern Grebo", "grk": "Greek languages", "grm": "Kota Marudu Talantang", "gro": "Groma", "grq": "Gorovu", "grr": "Taznatit", "grs": "Gresi", "grt": "Garo", "gru": "Kistane", "grv": "Central Grebo", "grw": "Gweda", "grx": "Guriaso", "gry": "Barclayville Grebo", "grz": "Guramalum", "gse": "Ghanaian Sign Language", "gsg": "German Sign Language", "gsl": "Gusilay", "gsm": "Guatemalan Sign Language", "gsn": "Nema; Gusan", "gso": "Southwest Gbaya", "gsp": "Wasembo", "gss": "Greek Sign Language", "gsw": "Swiss German; Alemannic; Alsatian", "gta": "Guató", "gtu": "Aghu-Tharnggala", "gu": "Gujarati", "gua": "Shiki", "gub": "Guajajára", "guc": "Wayuu", "gud": "Yocoboué Dida", "gue": "Gurindji", "guf": "Gupapuyngu", "gug": "Paraguayan Guaraní", "guh": "Guahibo", "gui": "Eastern Bolivian Guaraní", "guk": "Gumuz", "gul": "Sea Island Creole English", "gum": "Guambiano", "gun": "Mbyá Guaraní", "guo": "Guayabero", "gup": "Gunwinggu", "guq": "Aché", "gur": "Farefare", "gus": "Guinean Sign Language", "gut": "Maléku Jaíka", "guu": "Yanomamö", "guw": "Gun", "gux": "Gourmanchéma", "guz": "Gusii; Ekegusii", "gv": "Manx", "gva": "Guana (Paraguay)", "gvc": "Guanano", "gve": "Duwet", "gvf": "Golin", "gvj": "Guajá", "gvl": "Gulay", "gvm": "Gurmana", "gvn": "Kuku-Yalanji", "gvo": "Gavião Do Jiparaná", "gvp": "Pará Gavião", "gvr": "Gurung", "gvs": "Gumawana", "gvy": "Guyani", "gwa": "Mbato", "gwb": "Gwa", "gwc": "Gawri; Kalami", "gwd": "Gawwada", "gwe": "Gweno", "gwf": "Gowro", "gwg": "Moo", "gwi": "Gwichʼin", "gwj": "ǀGwi", "gwm": "Awngthim", "gwn": "Gwandara", "gwr": "Gwere", "gwt": "Gawar-Bati", "gwu": "Guwamu", "gww": "Kwini", "gwx": "Gua", "gxx": "Wè Southern", "gya": "Northwest Gbaya", "gyb": "Garus", "gyd": "Kayardild", "gye": "Gyem", "gyf": "Gungabula", "gyg": "Gbayi", "gyi": "Gyele", "gyl": "Gayil", "gym": "Ngäbere", "gyn": "Guyanese Creole English", "gyo": "Gyalsumdo", "gyr": "Guarayu", "gyy": "Gunya", "gyz": "Geji; Gyaazi", "gza": "Ganza", "gzi": "Gazi", "gzn": "Gane", "ha": "Hausa", "haa": "Han", "hab": "Hanoi Sign Language", "hac": "Gurani", "had": "Hatam", "hae": "Eastern Oromo", "haf": "Haiphong Sign Language", "hag": "Hanga", "hah": "Hahon", "hai": "Haida", "haj": "Hajong", "hak": "Hakka Chinese", "hal": "Halang", "ham": "Hewa", "han": "Hangaza", "hao": "Hakö", "hap": "Hupla", "haq": "Ha", "har": "Harari", "has": "Haisla", "hav": "Havu", "haw": "Hawaiian", "hax": "Southern Haida", "hay": "Haya", "haz": "Hazaragi", "hba": "Hamba", "hbb": "Huba", "hbn": "Heiban", "hbo": "Ancient Hebrew", "hbu": "Habu", "hca": "Andaman Creole Hindi", "hch": "Huichol", "hdn": "Northern Haida", "hds": "Honduras Sign Language", "hdy": "Hadiyya", "he": "Hebrew", "hea": "Northern Qiandong Miao", "hed": "Herdé", "heg": "Helong", "heh": "Hehe", "hei": "Heiltsuk", "hem": "Hemba", "hgm": "Haiǁom", "hgw": "Haigwai", "hhi": "Hoia Hoia", "hhr": "Kerak", "hhy": "Hoyahoya", "hi": "Hindi", "hia": "Lamang", "hib": "Hibito", "hid": "Hidatsa", "hif": "Fiji Hindi", "hig": "Kamwe", "hih": "Pamosu", "hii": "Hinduri", "hij": "Hijuk", "hik": "Seit-Kaitetu", "hil": "Hiligaynon", "him": "Himachali languages; Western Pahari languages", "hio": "Tsoa", "hir": "Himarimã", "hit": "Hittite", "hiw": "Hiw", "hix": "Hixkaryána", "hji": "Haji", "hka": "Kahe", "hke": "Hunde", "hkh": "Khah; Poguli", "hkk": "Hunjara-Kaina Ke", "hkn": "Mel-Khaonh", "hks": "Hong Kong Sign Language; Heung Kong Sau Yue", "hla": "Halia", "hlb": "Halbi", "hld": "Halang Doan", "hle": "Hlersu", "hlt": "Matu Chin", "hlu": "Hieroglyphic Luwian", "hma": "Southern Mashan Hmong; Southern Mashan Miao", "hmb": "Humburi Senni Songhay", "hmc": "Central Huishui Hmong; Central Huishui Miao", "hmd": "Large Flowery Miao; A-hmaos; Da-Hua Miao", "hme": "Eastern Huishui Hmong; Eastern Huishui Miao", "hmf": "Hmong Don", "hmg": "Southwestern Guiyang Hmong", "hmh": "Southwestern Huishui Hmong; Southwestern Huishui Miao", "hmi": "Northern Huishui Hmong; Northern Huishui Miao", "hmj": "Ge; Gejia", "hmk": "Maek", "hml": "Luopohe Hmong; Luopohe Miao", "hmm": "Central Mashan Hmong; Central Mashan Miao", "hmn": "Hmong; Mong", "hmp": "Northern Mashan Hmong; Northern Mashan Miao", "hmq": "Eastern Qiandong Miao", "hmr": "Hmar", "hms": "Southern Qiandong Miao", "hmt": "Hamtai", "hmu": "Hamap", "hmv": "Hmong Dô", "hmw": "Western Mashan Hmong; Western Mashan Miao", "hmx": "Hmong-Mien languages", "hmy": "Southern Guiyang Hmong; Southern Guiyang Miao", "hmz": "Hmong Shua; Sinicized Miao", "hna": "Mina (Cameroon)", "hnd": "Southern Hindko", "hne": "Chhattisgarhi", "hng": "Hungu", "hnh": "ǁAni", "hni": "Hani", "hnj": "Hmong Njua; Mong Leng; Mong Njua", "hnn": "Hanunoo", "hno": "Northern Hindko", "hns": "Caribbean Hindustani", "hnu": "Hung", "ho": "Hiri Motu", "hoa": "Hoava", "hob": "Mari (Madang Province)", "hoc": "Ho", "hod": "Holma", "hoe": "Horom", "hoh": "Hobyót", "hoi": "Holikachuk", "hoj": "Hadothi; Haroti", "hok": "Hokan languages", "hol": "Holu", "hom": "Homa", "hoo": "Holoholo", "hop": "Hopi", "hor": "Horo", "hos": "Ho Chi Minh City Sign Language", "hot": "Hote; Malê", "hov": "Hovongan", "how": "Honi", "hoy": "Holiya", "hoz": "Hozo", "hpo": "Hpon", "hps": "Hawai'i Sign Language (HSL); Hawai'i Pidgin Sign Language", "hr": "Croatian", "hra": "Hrangkhol", "hrc": "Niwer Mil", "hre": "Hre", "hrk": "Haruku", "hrm": "Horned Miao", "hro": "Haroi", "hrp": "Nhirrpi", "hrt": "Hértevin", "hru": "Hruso", "hrw": "Warwar Feni", "hrx": "Hunsrik", "hrz": "Harzani", "hsb": "Upper Sorbian", "hsh": "Hungarian Sign Language", "hsl": "Hausa Sign Language", "hsn": "Xiang Chinese", "hss": "Harsusi", "ht": "Haitian; Haitian Creole", "hti": "Hoti", "hto": "Minica Huitoto", "hts": "Hadza", "htu": "Hitu", "htx": "Middle Hittite", "hu": "Hungarian", "hub": "Huambisa", "huc": "ǂHua; ǂʼAmkhoe", "hud": "Huaulu", "hue": "San Francisco Del Mar Huave", "huf": "Humene", "hug": "Huachipaeri", "huh": "Huilliche", "hui": "Huli", "huj": "Northern Guiyang Hmong; Northern Guiyang Miao", "huk": "Hulung", "hul": "Hula", "hum": "Hungana", "huo": "Hu", "hup": "Hupa", "huq": "Tsat", "hur": "Halkomelem", "hus": "Huastec", "hut": "Humla", "huu": "Murui Huitoto", "huv": "San Mateo Del Mar Huave", "huw": "Hukumina", "hux": "Nüpode Huitoto", "huy": "Hulaulá", "huz": "Hunzib", "hvc": "Haitian Vodoun Culture Language", "hve": "San Dionisio Del Mar Huave", "hvk": "Haveke", "hvn": "Sabu", "hvv": "Santa María Del Mar Huave", "hwa": "Wané", "hwc": "Hawai'i Creole English; Hawai'i Pidgin", "hwo": "Hwana", "hy": "Armenian", "hya": "Hya", "hyw": "Western Armenian", "hyx": "Armenian (family)", "hz": "Herero", "ia": "Interlingua (International Auxiliary Language Association)", "iai": "Iaai", "ian": "Iatmul", "iar": "Purari", "iba": "Iban", "ibb": "Ibibio", "ibd": "Iwaidja", "ibe": "Akpes", "ibg": "Ibanag", "ibh": "Bih", "ibl": "Ibaloi", "ibm": "Agoi", "ibn": "Ibino", "ibr": "Ibuoro", "ibu": "Ibu", "iby": "Ibani", "ica": "Ede Ica", "ich": "Etkywan", "icl": "Icelandic Sign Language", "icr": "Islander Creole English", "id": "Indonesian", "ida": "Idakho-Isukha-Tiriki; Luidakho-Luisukha-Lutirichi", "idb": "Indo-Portuguese", "idc": "Idon; Ajiya", "idd": "Ede Idaca", "ide": "Idere", "idi": "Idi", "idr": "Indri", "ids": "Idesa", "idt": "Idaté", "idu": "Idoma", "ie": "Interlingue; Occidental", "ifa": "Amganad Ifugao", "ifb": "Batad Ifugao; Ayangan Ifugao", "ife": "Ifè", "iff": "Ifo", "ifk": "Tuwali Ifugao", "ifm": "Teke-Fuumu", "ifu": "Mayoyao Ifugao", "ify": "Keley-I Kallahan", "ig": "Igbo", "igb": "Ebira", "ige": "Igede", "igg": "Igana", "igl": "Igala", "igm": "Kanggape", "ign": "Ignaciano", "igo": "Isebe", "igs": "Interglossa", "igw": "Igwe", "ihb": "Iha Based Pidgin", "ihi": "Ihievbe", "ihp": "Iha", "ihw": "Bidhawal", "ii": "Sichuan Yi; Nuosu", "iin": "Thiin", "iir": "Indo-Iranian languages", "ijc": "Izon", "ije": "Biseni", "ijj": "Ede Ije", "ijn": "Kalabari", "ijo": "Ijo languages", "ijs": "Southeast Ijo", "ik": "Inupiaq", "ike": "Eastern Canadian Inuktitut", "iki": "Iko", "ikk": "Ika", "ikl": "Ikulu", "iko": "Olulumo-Ikom", "ikp": "Ikpeshi", "ikr": "Ikaranggal", "iks": "Inuit Sign Language", "ikt": "Inuinnaqtun; Western Canadian Inuktitut", "ikv": "Iku-Gora-Ankwa", "ikw": "Ikwere", "ikx": "Ik", "ikz": "Ikizu", "ila": "Ile Ape", "ilb": "Ila", "ilg": "Garig-Ilgar", "ili": "Ili Turki", "ilk": "Ilongot", "ilm": "Iranun (Malaysia)", "ilo": "Iloko", "ilp": "Iranun (Philippines)", "ils": "International Sign", "ilu": "Ili'uun", "ilv": "Ilue", "ima": "Mala Malasar", "imi": "Anamgura", "iml": "Miluk", "imn": "Imonda", "imo": "Imbongu", "imr": "Imroing", "ims": "Marsian", "imt": "Imotong", "imy": "Milyan", "inb": "Inga", "inc": "Indic languages", "ine": "Indo-European languages", "ing": "Degexit'an", "inh": "Ingush", "inj": "Jungle Inga", "inl": "Indonesian Sign Language", "inm": "Minaean", "inn": "Isinai", "ino": "Inoke-Yate", "inp": "Iñapari", "ins": "Indian Sign Language", "int": "Intha", "inz": "Ineseño", "io": "Ido", "ior": "Inor", "iou": "Tuma-Irumu", "iow": "Iowa-Oto", "ipi": "Ipili", "ipo": "Ipiko", "iqu": "Iquito", "iqw": "Ikwo", "ira": "Iranian languages", "ire": "Iresim", "irh": "Irarutu", "iri": "Rigwe; Irigwe", "irk": "Iraqw", "irn": "Irántxe", "iro": "Iroquoian languages", "irr": "Ir", "iru": "Irula", "irx": "Kamberau", "iry": "Iraya", "is": "Icelandic", "isa": "Isabi", "isc": "Isconahua", "isd": "Isnag", "ise": "Italian Sign Language", "isg": "Irish Sign Language", "ish": "Esan", "isi": "Nkem-Nkum", "isk": "Ishkashimi", "ism": "Masimasi", "isn": "Isanzu", "iso": "Isoko", "isr": "Israeli Sign Language", "ist": "Istriot", "isu": "Isu (Menchum Division)", "it": "Italian", "itb": "Binongan Itneg", "itc": "Italic languages", "itd": "Southern Tidung", "ite": "Itene", "iti": "Inlaod Itneg", "itk": "Judeo-Italian", "itl": "Itelmen", "itm": "Itu Mbon Uzo", "ito": "Itonama", "itr": "Iteri", "its": "Isekiri", "itt": "Maeng Itneg", "itv": "Itawit", "itw": "Ito", "itx": "Itik", "ity": "Moyadan Itneg", "itz": "Itzá", "iu": "Inuktitut", "ium": "Iu Mien", "ivb": "Ibatan", "ivv": "Ivatan", "iwk": "I-Wak", "iwm": "Iwam", "iwo": "Iwur", "iws": "Sepik Iwam", "ixc": "Ixcatec", "ixl": "Ixil", "iya": "Iyayu", "iyo": "Mesaka", "iyx": "Yaka (Congo)", "izh": "Ingrian", "izr": "Izere", "izz": "Izii", "ja": "Japanese", "jaa": "Jamamadí", "jab": "Hyam", "jac": "Popti'; Jakalteko", "jad": "Jahanka", "jae": "Yabem", "jaf": "Jara", "jah": "Jah Hut", "jaj": "Zazao", "jak": "Jakun", "jal": "Yalahatan", "jam": "Jamaican Creole English", "jan": "Jandai", "jao": "Yanyuwa", "jaq": "Yaqay", "jas": "New Caledonian Javanese", "jat": "Jakati", "jau": "Yaur", "jax": "Jambi Malay", "jay": "Yan-nhangu; Nhangu", "jaz": "Jawe", "jbe": "Judeo-Berber", "jbi": "Badjiri", "jbj": "Arandai", "jbk": "Barikewa", "jbm": "Bijim", "jbn": "Nafusi", "jbo": "Lojban", "jbr": "Jofotek-Bromnya", "jbt": "Jabutí", "jbu": "Jukun Takum", "jbw": "Yawijibaya", "jcs": "Jamaican Country Sign Language", "jct": "Krymchak", "jda": "Jad", "jdg": "Jadgali", "jdt": "Judeo-Tat", "jeb": "Jebero", "jee": "Jerung", "jeh": "Jeh", "jei": "Yei", "jek": "Jeri Kuo", "jel": "Yelmek", "jen": "Dza", "jer": "Jere", "jet": "Manem", "jeu": "Jonkor Bourmataguil", "jgb": "Ngbee", "jge": "Judeo-Georgian", "jgk": "Gwak", "jgo": "Ngomba", "jhi": "Jehai", "jhs": "Jhankot Sign Language", "jia": "Jina", "jib": "Jibu", "jic": "Tol", "jid": "Bu (Kaduna State)", "jie": "Jilbe", "jig": "Jingulu; Djingili", "jih": "sTodsde; Shangzhai", "jii": "Jiiddu", "jil": "Jilim", "jim": "Jimi (Cameroon)", "jio": "Jiamao", "jiq": "Guanyinqiao; Lavrung", "jit": "Jita", "jiu": "Youle Jinuo", "jiv": "Shuar", "jiy": "Buyuan Jinuo", "jje": "Jejueo", "jjr": "Bankal", "jka": "Kaera", "jkm": "Mobwa Karen", "jko": "Kubo", "jkp": "Paku Karen", "jkr": "Koro (India)", "jks": "Amami Koniya Sign Language", "jku": "Labir", "jle": "Ngile", "jls": "Jamaican Sign Language", "jma": "Dima", "jmb": "Zumbun", "jmc": "Machame", "jmd": "Yamdena", "jmi": "Jimi (Nigeria)", "jml": "Jumli", "jmn": "Makuri Naga", "jmr": "Kamara", "jms": "Mashi (Nigeria)", "jmw": "Mouwase", "jmx": "Western Juxtlahuaca Mixtec", "jna": "Jangshung", "jnd": "Jandavra", "jng": "Yangman", "jni": "Janji", "jnj": "Yemsa", "jnl": "Rawat", "jns": "Jaunsari", "job": "Joba", "jod": "Wojenaka", "jog": "Jogi", "jor": "Jorá", "jos": "Jordanian Sign Language", "jow": "Jowulu", "jpa": "Jewish Palestinian Aramaic", "jpr": "Judeo-Persian", "jpx": "Japanese (family)", "jqr": "Jaqaru", "jra": "Jarai", "jrb": "Judeo-Arabic", "jrr": "Jiru", "jrt": "Jakattoe", "jru": "Japrería", "jsl": "Japanese Sign Language", "jua": "Júma", "jub": "Wannu", "juc": "Jurchen", "jud": "Worodougou", "juh": "Hõne", "jui": "Ngadjuri", "juk": "Wapan", "jul": "Jirel", "jum": "Jumjum", "jun": "Juang", "juo": "Jiba", "jup": "Hupdë", "jur": "Jurúna", "jus": "Jumla Sign Language", "jut": "Jutish", "juu": "Ju", "juw": "Wãpha", "juy": "Juray", "jv": "Javanese", "jvd": "Javindo", "jvn": "Caribbean Javanese", "jwi": "Jwira-Pepesa", "jya": "Jiarong", "jye": "Judeo-Yemeni Arabic", "jyy": "Jaya", "ka": "Georgian", "kaa": "Kara-Kalpak; Karakalpak", "kab": "Kabyle", "kac": "Kachin; Jingpho", "kad": "Adara", "kae": "Ketangalan", "kaf": "Katso", "kag": "Kajaman", "kah": "Kara (Central African Republic)", "kai": "Karekare", "kaj": "Jju", "kak": "Kalanguya; Kayapa Kallahan", "kam": "Kamba (Kenya)", "kao": "Xaasongaxango", "kap": "Bezhta", "kaq": "Capanahua", "kar": "Karen languages", "kav": "Katukína", "kaw": "Kawi", "kax": "Kao", "kay": "Kamayurá", "kba": "Kalarko", "kbb": "Kaxuiâna", "kbc": "Kadiwéu", "kbd": "Kabardian", "kbe": "Kanju", "kbg": "Khamba", "kbh": "Camsá", "kbi": "Kaptiau", "kbj": "Kari", "kbk": "Grass Koiari", "kbl": "Kanembu", "kbm": "Iwal", "kbn": "Kare (Central African Republic)", "kbo": "Keliko", "kbp": "Kabiyè", "kbq": "Kamano", "kbr": "Kafa", "kbs": "Kande", "kbt": "Abadi", "kbu": "Kabutra", "kbv": "Dera (Indonesia)", "kbw": "Kaiep", "kbx": "Ap Ma", "kby": "Manga Kanuri", "kbz": "Duhwa", "kca": "Khanty", "kcb": "Kawacha", "kcc": "Lubila", "kcd": "Ngkâlmpw Kanum", "kce": "Kaivi", "kcf": "Ukaan", "kcg": "Tyap", "kch": "Vono", "kci": "Kamantan", "kcj": "Kobiana", "kck": "Kalanga", "kcl": "Kela (Papua New Guinea); Kala", "kcm": "Gula (Central African Republic)", "kcn": "Nubi", "kco": "Kinalakna", "kcp": "Kanga", "kcq": "Kamo", "kcr": "Katla", "kcs": "Koenoem", "kct": "Kaian", "kcu": "Kami (Tanzania)", "kcv": "Kete", "kcw": "Kabwari", "kcx": "Kachama-Ganjule", "kcy": "Korandje", "kcz": "Konongo", "kda": "Worimi", "kdc": "Kutu", "kdd": "Yankunytjatjara", "kde": "Makonde", "kdf": "Mamusi", "kdg": "Seba", "kdh": "Tem", "kdi": "Kumam", "kdj": "Karamojong", "kdk": "Numèè; Kwényi", "kdl": "Tsikimba", "kdm": "Kagoma", "kdn": "Kunda", "kdo": "Kordofanian languages", "kdp": "Kaningdon-Nindem", "kdq": "Koch", "kdr": "Karaim", "kdt": "Kuy", "kdu": "Kadaru", "kdw": "Koneraw", "kdx": "Kam", "kdy": "Keder; Keijar", "kdz": "Kwaja", "kea": "Kabuverdianu", "keb": "Kélé", "kec": "Keiga", "ked": "Kerewe", "kee": "Eastern Keres", "kef": "Kpessi", "keg": "Tese", "keh": "Keak", "kei": "Kei", "kej": "Kadar", "kek": "Kekchí", "kel": "Kela (Democratic Republic of Congo)", "kem": "Kemak", "ken": "Kenyang", "keo": "Kakwa", "kep": "Kaikadi", "keq": "Kamar", "ker": "Kera", "kes": "Kugbo", "ket": "Ket", "keu": "Akebu", "kev": "Kanikkaran", "kew": "West Kewa", "kex": "Kukna", "key": "Kupia", "kez": "Kukele", "kfa": "Kodava", "kfb": "Northwestern Kolami", "kfc": "Konda-Dora", "kfd": "Korra Koraga", "kfe": "Kota (India)", "kff": "Koya", "kfg": "Kudiya", "kfh": "Kurichiya", "kfi": "Kannada Kurumba", "kfj": "Kemiehua", "kfk": "Kinnauri", "kfl": "Kung", "kfm": "Khunsari", "kfn": "Kuk", "kfo": "Koro (Côte d'Ivoire)", "kfp": "Korwa", "kfq": "Korku", "kfr": "Kachhi; Kutchi", "kfs": "Bilaspuri", "kft": "Kanjari", "kfu": "Katkari", "kfv": "Kurmukar", "kfw": "Kharam Naga", "kfx": "Kullu Pahari", "kfy": "Kumaoni", "kfz": "Koromfé", "kg": "Kongo", "kga": "Koyaga", "kgb": "Kawe", "kge": "Komering", "kgf": "Kube", "kgg": "Kusunda", "kgi": "Selangor Sign Language", "kgj": "Gamale Kham", "kgk": "Kaiwá", "kgl": "Kunggari", "kgm": "Karipúna", "kgn": "Karingani", "kgo": "Krongo", "kgp": "Kaingang", "kgq": "Kamoro", "kgr": "Abun", "kgs": "Kumbainggar", "kgt": "Somyev", "kgu": "Kobol", "kgv": "Karas", "kgw": "Karon Dori", "kgx": "Kamaru", "kgy": "Kyerung", "kha": "Khasi", "khb": "Lü", "khc": "Tukang Besi North", "khd": "Bädi Kanum", "khe": "Korowai", "khf": "Khuen", "khg": "Khams Tibetan", "khh": "Kehu", "khi": "Khoisan languages", "khj": "Kuturmi", "khk": "Halh Mongolian", "khl": "Lusi", "khn": "Khandesi", "kho": "Khotanese; Sakan", "khp": "Kapori; Kapauri", "khq": "Koyra Chiini Songhay", "khr": "Kharia", "khs": "Kasua", "kht": "Khamti", "khu": "Nkhumbi", "khv": "Khvarshi", "khw": "Khowar", "khx": "Kanu", "khy": "Kele (Democratic Republic of Congo)", "khz": "Keapara", "ki": "Kikuyu; Gikuyu", "kia": "Kim", "kib": "Koalib", "kic": "Kickapoo", "kid": "Koshin", "kie": "Kibet", "kif": "Eastern Parbate Kham", "kig": "Kimaama; Kimaghima", "kih": "Kilmeri", "kii": "Kitsai", "kij": "Kilivila", "kil": "Kariya", "kim": "Karagas", "kio": "Kiowa", "kip": "Sheshi Kham", "kiq": "Kosadle; Kosare", "kis": "Kis", "kit": "Agob", "kiu": "Kirmanjki (individual language)", "kiv": "Kimbu", "kiw": "Northeast Kiwai", "kix": "Khiamniungan Naga", "kiy": "Kirikiri", "kiz": "Kisi", "kj": "Kuanyama; Kwanyama", "kja": "Mlap", "kjb": "Q'anjob'al; Kanjobal", "kjc": "Coastal Konjo", "kjd": "Southern Kiwai", "kje": "Kisar", "kjg": "Khmu", "kjh": "Khakas", "kji": "Zabana", "kjj": "Khinalugh", "kjk": "Highland Konjo", "kjl": "Western Parbate Kham", "kjm": "Kháng", "kjn": "Kunjen", "kjo": "Harijan Kinnauri", "kjp": "Pwo Eastern Karen", "kjq": "Western Keres", "kjr": "Kurudu", "kjs": "East Kewa", "kjt": "Phrae Pwo Karen", "kju": "Kashaya", "kjv": "Kaikavian Literary Language", "kjx": "Ramopa", "kjy": "Erave", "kjz": "Bumthangkha", "kk": "Kazakh", "kka": "Kakanda", "kkb": "Kwerisa", "kkc": "Odoodee", "kkd": "Kinuku", "kke": "Kakabe", "kkf": "Kalaktang Monpa", "kkg": "Mabaka Valley Kalinga", "kkh": "Khün", "kki": "Kagulu", "kkj": "Kako", "kkk": "Kokota", "kkl": "Kosarek Yale", "kkm": "Kiong", "kkn": "Kon Keu", "kko": "Karko", "kkp": "Gugubera; Koko-Bera", "kkq": "Kaeku", "kkr": "Kir-Balar", "kks": "Giiwo", "kkt": "Koi", "kku": "Tumi", "kkv": "Kangean", "kkw": "Teke-Kukuya", "kkx": "Kohin", "kky": "Guugu Yimidhirr; Guguyimidjir", "kkz": "Kaska", "kl": "Kalaallisut; Greenlandic", "kla": "Klamath-Modoc", "klb": "Kiliwa", "klc": "Kolbila", "kld": "Gamilaraay", "kle": "Kulung (Nepal)", "klf": "Kendeje", "klg": "Tagakaulo", "klh": "Weliki", "kli": "Kalumpang", "klj": "Khalaj", "klk": "Kono (Nigeria)", "kll": "Kagan Kalagan", "klm": "Migum", "kln": "Kalenjin", "klo": "Kapya", "klp": "Kamasa", "klq": "Rumu", "klr": "Khaling", "kls": "Kalasha", "klt": "Nukna", "klu": "Klao", "klv": "Maskelynes", "klw": "Tado; Lindu", "klx": "Koluwawa", "kly": "Kalao", "klz": "Kabola", "km": "Khmer; Central Khmer", "kma": "Konni", "kmb": "Kimbundu", "kmc": "Southern Dong", "kmd": "Majukayang Kalinga", "kme": "Bakole", "kmf": "Kare (Papua New Guinea)", "kmg": "Kâte", "kmh": "Kalam", "kmi": "Kami (Nigeria)", "kmj": "Kumarbhag Paharia", "kmk": "Limos Kalinga", "kml": "Tanudan Kalinga", "kmm": "Kom (India)", "kmn": "Awtuw", "kmo": "Kwoma", "kmp": "Gimme", "kmq": "Kwama", "kmr": "Northern Kurdish", "kms": "Kamasau", "kmt": "Kemtuik", "kmu": "Kanite", "kmv": "Karipúna Creole French", "kmw": "Komo (Democratic Republic of Congo)", "kmx": "Waboda", "kmy": "Koma", "kmz": "Khorasani Turkish", "kn": "Kannada", "kna": "Dera (Nigeria)", "knb": "Lubuagan Kalinga", "knc": "Central Kanuri", "knd": "Konda", "kne": "Kankanaey", "knf": "Mankanya", "kng": "Koongo", "kni": "Kanufi", "knj": "Western Kanjobal", "knk": "Kuranko", "knl": "Keninjal", "knm": "Kanamarí", "knn": "Konkani (individual language)", "kno": "Kono (Sierra Leone)", "knp": "Kwanja", "knq": "Kintaq", "knr": "Kaningra", "kns": "Kensiu", "knt": "Panoan Katukína", "knu": "Kono (Guinea)", "knv": "Tabo", "knw": "Kung-Ekoka", "knx": "Kendayan; Salako", "kny": "Kanyok", "knz": "Kalamsé", "ko": "Korean", "koa": "Konomala", "koc": "Kpati", "kod": "Kodi", "koe": "Kacipo-Bale Suri", "kof": "Kubi", "kog": "Cogui; Kogi", "koh": "Koyo", "koi": "Komi-Permyak", "kok": "Konkani (macrolanguage)", "kol": "Kol (Papua New Guinea)", "koo": "Konzo", "kop": "Waube", "koq": "Kota (Gabon)", "kos": "Kosraean", "kot": "Lagwan", "kou": "Koke", "kov": "Kudu-Camo", "kow": "Kugama", "koy": "Koyukon", "koz": "Korak", "kpa": "Kutto", "kpb": "Mullu Kurumba", "kpc": "Curripaco", "kpd": "Koba", "kpe": "Kpelle", "kpf": "Komba", "kpg": "Kapingamarangi", "kph": "Kplang", "kpi": "Kofei", "kpj": "Karajá", "kpk": "Kpan", "kpl": "Kpala", "kpm": "Koho", "kpn": "Kepkiriwát", "kpo": "Ikposo", "kpq": "Korupun-Sela", "kpr": "Korafe-Yegha", "kps": "Tehit", "kpt": "Karata", "kpu": "Kafoa", "kpv": "Komi-Zyrian", "kpw": "Kobon", "kpx": "Mountain Koiali", "kpy": "Koryak", "kpz": "Kupsabiny", "kqa": "Mum", "kqb": "Kovai", "kqc": "Doromu-Koki", "kqd": "Koy Sanjaq Surat", "kqe": "Kalagan", "kqf": "Kakabai", "kqg": "Khe", "kqh": "Kisankasa", "kqi": "Koitabu", "kqj": "Koromira", "kqk": "Kotafon Gbe", "kql": "Kyenele", "kqm": "Khisa", "kqn": "Kaonde", "kqo": "Eastern Krahn", "kqp": "Kimré", "kqq": "Krenak", "kqr": "Kimaragang", "kqs": "Northern Kissi", "kqt": "Klias River Kadazan", "kqu": "Seroa", "kqv": "Okolod", "kqw": "Kandas", "kqx": "Mser", "kqy": "Koorete", "kqz": "Korana", "kr": "Kanuri", "kra": "Kumhali", "krb": "Karkin", "krc": "Karachay-Balkar", "krd": "Kairui-Midiki", "kre": "Panará", "krf": "Koro (Vanuatu)", "krh": "Kurama", "kri": "Krio", "krj": "Kinaray-A", "krk": "Kerek", "krl": "Karelian", "krn": "Sapo", "kro": "Kru languages", "krp": "Korop", "krr": "Krung", "krs": "Gbaya (Sudan)", "krt": "Tumari Kanuri", "kru": "Kurukh", "krv": "Kavet", "krw": "Western Krahn", "krx": "Karon", "kry": "Kryts", "krz": "Sota Kanum", "ks": "Kashmiri", "ksa": "Shuwa-Zamani", "ksb": "Shambala", "ksc": "Southern Kalinga", "ksd": "Kuanua", "kse": "Kuni", "ksf": "Bafia", "ksg": "Kusaghe", "ksh": "Kölsch", "ksi": "Krisa; I'saka", "ksj": "Uare", "ksk": "Kansa", "ksl": "Kumalu", "ksm": "Kumba", "ksn": "Kasiguranin", "kso": "Kofa", "ksp": "Kaba", "ksq": "Kwaami", "ksr": "Borong", "kss": "Southern Kisi", "kst": "Winyé", "ksu": "Khamyang", "ksv": "Kusu", "ksw": "S'gaw Karen", "ksx": "Kedang", "ksy": "Kharia Thar", "ksz": "Kodaku", "kta": "Katua", "ktb": "Kambaata", "ktc": "Kholok", "ktd": "Kokata; Kukatha", "kte": "Nubri", "ktf": "Kwami", "ktg": "Kalkutung", "kth": "Karanga", "kti": "North Muyu", "ktj": "Plapo Krumen", "ktk": "Kaniet", "ktl": "Koroshi", "ktm": "Kurti", "ktn": "Karitiâna", "kto": "Kuot", "ktp": "Kaduo", "ktq": "Katabaga", "kts": "South Muyu", "ktt": "Ketum", "ktu": "Kituba (Democratic Republic of Congo)", "ktv": "Eastern Katu", "ktw": "Kato", "ktx": "Kaxararí", "kty": "Kango (Bas-Uélé District)", "ktz": "Juǀʼhoan; Juǀʼhoansi", "ku": "Kurdish", "kub": "Kutep", "kuc": "Kwinsu", "kud": "'Auhelawa", "kue": "Kuman (Papua New Guinea)", "kuf": "Western Katu", "kug": "Kupa", "kuh": "Kushi", "kui": "Kuikúro-Kalapálo; Kalapalo", "kuj": "Kuria", "kuk": "Kepo'", "kul": "Kulere", "kum": "Kumyk", "kun": "Kunama", "kuo": "Kumukio", "kup": "Kunimaipa", "kuq": "Karipuna", "kus": "Kusaal", "kut": "Kutenai", "kuu": "Upper Kuskokwim", "kuv": "Kur", "kuw": "Kpagua", "kux": "Kukatja", "kuy": "Kuuku-Ya'u", "kuz": "Kunza", "kv": "Komi", "kva": "Bagvalal", "kvb": "Kubu", "kvc": "Kove", "kvd": "Kui (Indonesia)", "kve": "Kalabakan", "kvf": "Kabalai", "kvg": "Kuni-Boazi", "kvh": "Komodo", "kvi": "Kwang", "kvj": "Psikye", "kvk": "Korean Sign Language", "kvl": "Kayaw", "kvm": "Kendem", "kvn": "Border Kuna", "kvo": "Dobel", "kvp": "Kompane", "kvq": "Geba Karen", "kvr": "Kerinci", "kvt": "Lahta Karen; Lahta", "kvu": "Yinbaw Karen", "kvv": "Kola", "kvw": "Wersing", "kvx": "Parkari Koli", "kvy": "Yintale Karen; Yintale", "kvz": "Tsakwambo; Tsaukambo", "kw": "Cornish", "kwa": "Dâw", "kwb": "Kwa", "kwc": "Likwala", "kwd": "Kwaio", "kwe": "Kwerba", "kwf": "Kwara'ae", "kwg": "Sara Kaba Deme", "kwh": "Kowiai", "kwi": "Awa-Cuaiquer", "kwj": "Kwanga", "kwk": "Kwakiutl", "kwl": "Kofyar", "kwm": "Kwambi", "kwn": "Kwangali", "kwo": "Kwomtari", "kwp": "Kodia", "kwr": "Kwer", "kws": "Kwese", "kwt": "Kwesten", "kwu": "Kwakum", "kwv": "Sara Kaba Náà", "kww": "Kwinti", "kwx": "Khirwar", "kwy": "San Salvador Kongo", "kwz": "Kwadi", "kxa": "Kairiru", "kxb": "Krobu", "kxc": "Konso; Khonso", "kxd": "Brunei", "kxf": "Manumanaw Karen; Manumanaw", "kxh": "Karo (Ethiopia)", "kxi": "Keningau Murut", "kxj": "Kulfa", "kxk": "Zayein Karen", "kxm": "Northern Khmer", "kxn": "Kanowit-Tanjong Melanau", "kxo": "Kanoé", "kxp": "Wadiyara Koli", "kxq": "Smärky Kanum", "kxr": "Koro (Papua New Guinea)", "kxs": "Kangjia", "kxt": "Koiwat", "kxv": "Kuvi", "kxw": "Konai", "kxx": "Likuba", "kxy": "Kayong", "kxz": "Kerewo", "ky": "Kirghiz; Kyrgyz", "kya": "Kwaya", "kyb": "Butbut Kalinga", "kyc": "Kyaka", "kyd": "Karey", "kye": "Krache", "kyf": "Kouya", "kyg": "Keyagana", "kyh": "Karok", "kyi": "Kiput", "kyj": "Karao", "kyk": "Kamayo", "kyl": "Kalapuya", "kym": "Kpatili", "kyn": "Northern Binukidnon", "kyo": "Kelon", "kyp": "Kang", "kyq": "Kenga", "kyr": "Kuruáya", "kys": "Baram Kayan", "kyt": "Kayagar", "kyu": "Western Kayah", "kyv": "Kayort", "kyw": "Kudmali", "kyx": "Rapoisi", "kyy": "Kambaira", "kyz": "Kayabí", "kza": "Western Karaboro", "kzb": "Kaibobo", "kzc": "Bondoukou Kulango", "kzd": "Kadai", "kze": "Kosena", "kzf": "Da'a Kaili", "kzg": "Kikai", "kzi": "Kelabit", "kzk": "Kazukuru", "kzl": "Kayeli", "kzm": "Kais", "kzn": "Kokola", "kzo": "Kaningi", "kzp": "Kaidipang", "kzq": "Kaike", "kzr": "Karang", "kzs": "Sugut Dusun", "kzu": "Kayupulau", "kzv": "Komyandaret", "kzw": "Karirí-Xocó", "kzx": "Kamarian", "kzy": "Kango (Tshopo District)", "kzz": "Kalabra", "la": "Latin", "laa": "Southern Subanen", "lab": "Linear A", "lac": "Lacandon", "lad": "Ladino", "lae": "Pattani", "laf": "Lafofa", "lag": "Langi", "lah": "Lahnda", "lai": "Lambya", "laj": "Lango (Uganda)", "lal": "Lalia", "lam": "Lamba", "lan": "Laru", "lap": "Laka (Chad)", "laq": "Qabiao", "lar": "Larteh", "las": "Lama (Togo)", "lau": "Laba", "law": "Lauje", "lax": "Tiwa", "lay": "Lama Bai", "laz": "Aribwatsa", "lb": "Luxembourgish; Letzeburgesch", "lbb": "Label", "lbc": "Lakkia", "lbe": "Lak", "lbf": "Tinani", "lbg": "Laopang", "lbi": "La'bi", "lbj": "Ladakhi", "lbk": "Central Bontok", "lbl": "Libon Bikol", "lbm": "Lodhi", "lbn": "Rmeet", "lbo": "Laven", "lbq": "Wampar", "lbr": "Lohorung", "lbs": "Libyan Sign Language", "lbt": "Lachi", "lbu": "Labu", "lbv": "Lavatbura-Lamusong", "lbw": "Tolaki", "lbx": "Lawangan", "lby": "Lamalama; Lamu-Lamu", "lbz": "Lardil", "lcc": "Legenyem", "lcd": "Lola", "lce": "Loncong; Sekak", "lcf": "Lubu", "lch": "Luchazi", "lcl": "Lisela", "lcm": "Tungag", "lcp": "Western Lawa", "lcq": "Luhu", "lcs": "Lisabata-Nuniali", "lda": "Kla-Dan", "ldb": "Dũya", "ldd": "Luri", "ldg": "Lenyima", "ldh": "Lamja-Dengsa-Tola", "ldi": "Laari", "ldj": "Lemoro", "ldk": "Leelau", "ldl": "Kaan", "ldm": "Landoma", "ldn": "Láadan", "ldo": "Loo", "ldp": "Tso", "ldq": "Lufu", "lea": "Lega-Shabunda", "leb": "Lala-Bisa", "lec": "Leco", "led": "Lendu", "lee": "Lyélé", "lef": "Lelemi", "leh": "Lenje", "lei": "Lemio", "lej": "Lengola", "lek": "Leipon", "lel": "Lele (Democratic Republic of Congo)", "lem": "Nomaande", "len": "Lenca", "leo": "Leti (Cameroon)", "lep": "Lepcha", "leq": "Lembena", "ler": "Lenkau", "les": "Lese", "let": "Lesing-Gelimi; Amio-Gelimi", "leu": "Kara (Papua New Guinea)", "lev": "Lamma", "lew": "Ledo Kaili", "lex": "Luang", "ley": "Lemolang", "lez": "Lezghian", "lfa": "Lefa", "lfn": "Lingua Franca Nova", "lg": "Ganda; Luganda", "lga": "Lungga", "lgb": "Laghu", "lgg": "Lugbara", "lgh": "Laghuu", "lgi": "Lengilu", "lgk": "Lingarak; Neverver", "lgl": "Wala", "lgm": "Lega-Mwenga", "lgn": "T'apo; Opuuo", "lgo": "Lango (South Sudan)", "lgq": "Logba", "lgr": "Lengo", "lgt": "Pahi", "lgu": "Longgu", "lgz": "Ligenza", "lha": "Laha (Viet Nam)", "lhh": "Laha (Indonesia)", "lhi": "Lahu Shi", "lhl": "Lahul Lohar", "lhm": "Lhomi", "lhn": "Lahanan", "lhp": "Lhokpu", "lhs": "Mlahsö", "lht": "Lo-Toga", "lhu": "Lahu", "li": "Limburgan; Limburger; Limburgish", "lia": "West-Central Limba", "lib": "Likum", "lic": "Hlai", "lid": "Nyindrou", "lie": "Likila", "lif": "Limbu", "lig": "Ligbi", "lih": "Lihir", "lij": "Ligurian", "lik": "Lika", "lil": "Lillooet", "lio": "Liki", "lip": "Sekpele", "liq": "Libido", "lir": "Liberian English", "lis": "Lisu", "liu": "Logorik", "liv": "Liv", "liw": "Col", "lix": "Liabuku", "liy": "Banda-Bambari", "liz": "Libinza", "lja": "Golpa", "lje": "Rampi", "lji": "Laiyolo", "ljl": "Li'o", "ljp": "Lampung Api", "ljw": "Yirandali", "ljx": "Yuru", "lka": "Lakalei", "lkb": "Kabras; Lukabaras", "lkc": "Kucong", "lkd": "Lakondê", "lke": "Kenyi", "lkh": "Lakha", "lki": "Laki", "lkj": "Remun", "lkl": "Laeko-Libuat", "lkm": "Kalaamaya", "lkn": "Lakon; Vure", "lko": "Khayo; Olukhayo", "lkr": "Päri", "lks": "Kisa; Olushisa", "lkt": "Lakota", "lku": "Kungkari", "lky": "Lokoya", "lla": "Lala-Roba", "llb": "Lolo", "llc": "Lele (Guinea)", "lld": "Ladin", "lle": "Lele (Papua New Guinea)", "llf": "Hermit", "llg": "Lole", "llh": "Lamu", "lli": "Teke-Laali", "llj": "Ladji Ladji", "llk": "Lelak", "lll": "Lilau", "llm": "Lasalimu", "lln": "Lele (Chad)", "llp": "North Efate", "llq": "Lolak", "lls": "Lithuanian Sign Language", "llu": "Lau", "llx": "Lauan", "lma": "East Limba", "lmb": "Merei", "lmc": "Limilngan", "lmd": "Lumun", "lme": "Pévé", "lmf": "South Lembata", "lmg": "Lamogai", "lmh": "Lambichhong", "lmi": "Lombi", "lmj": "West Lembata", "lmk": "Lamkang", "lml": "Hano", "lmn": "Lambadi", "lmo": "Lombard", "lmp": "Limbum", "lmq": "Lamatuka", "lmr": "Lamalera", "lmu": "Lamenu", "lmv": "Lomaiviti", "lmw": "Lake Miwok", "lmx": "Laimbue", "lmy": "Lamboya", "ln": "Lingala", "lna": "Langbashe", "lnb": "Mbalanhu", "lnd": "Lundayeh; Lun Bawang", "lng": "Langobardic", "lnh": "Lanoh", "lni": "Daantanai'", "lnj": "Leningitij", "lnl": "South Central Banda", "lnm": "Langam", "lnn": "Lorediakarkar", "lns": "Lamnso'", "lnu": "Longuda", "lnw": "Lanima", "lnz": "Lonzo", "lo": "Lao", "loa": "Loloda", "lob": "Lobi", "loc": "Inonhan", "loe": "Saluan", "lof": "Logol", "log": "Logo", "loh": "Narim", "loi": "Loma (Côte d'Ivoire)", "loj": "Lou", "lok": "Loko", "lol": "Mongo", "lom": "Loma (Liberia)", "lon": "Malawi Lomwe", "loo": "Lombo", "lop": "Lopa", "loq": "Lobala", "lor": "Téén", "los": "Loniu", "lot": "Otuho", "lou": "Louisiana Creole", "lov": "Lopi", "low": "Tampias Lobu", "lox": "Loun", "loy": "Loke", "loz": "Lozi", "lpa": "Lelepa", "lpe": "Lepki", "lpn": "Long Phuri Naga", "lpo": "Lipo", "lpx": "Lopit", "lqr": "Logir", "lra": "Rara Bakati'", "lrc": "Northern Luri", "lre": "Laurentian", "lrg": "Laragia", "lri": "Marachi; Olumarachi", "lrk": "Loarki", "lrl": "Lari", "lrm": "Marama; Olumarama", "lrn": "Lorang", "lro": "Laro", "lrr": "Southern Yamphu", "lrt": "Larantuka Malay", "lrv": "Larevat", "lrz": "Lemerig", "lsa": "Lasgerdi", "lsb": "Burundian Sign Language; Langue des Signes Burundaise", "lsc": "Albarradas Sign Language; Lengua de señas Albarradas", "lsd": "Lishana Deni", "lse": "Lusengo", "lsh": "Lish", "lsi": "Lashi", "lsl": "Latvian Sign Language", "lsm": "Saamia; Olusamia", "lsn": "Tibetan Sign Language", "lso": "Laos Sign Language", "lsp": "Panamanian Sign Language; Lengua de Señas Panameñas", "lsr": "Aruop", "lss": "Lasi", "lst": "Trinidad and Tobago Sign Language", "lsv": "Sivia Sign Language", "lsw": "Seychelles Sign Language; Lalang Siny Seselwa; Langue des Signes Seychelloise", "lsy": "Mauritian Sign Language", "lt": "Lithuanian", "ltc": "Late Middle Chinese", "ltg": "Latgalian", "lth": "Thur", "lti": "Leti (Indonesia)", "ltn": "Latundê", "lto": "Tsotso; Olutsotso", "lts": "Tachoni; Lutachoni", "ltu": "Latu", "lu": "Luba-Katanga", "lua": "Luba-Lulua", "luc": "Aringa", "lud": "Ludian", "lue": "Luvale", "luf": "Laua", "lui": "Luiseno", "luj": "Luna", "luk": "Lunanakha", "lul": "Olu'bo", "lum": "Luimbi", "lun": "Lunda", "luo": "Luo (Kenya and Tanzania); Dholuo", "lup": "Lumbu", "luq": "Lucumi", "lur": "Laura", "lus": "Lushai", "lut": "Lushootseed", "luu": "Lumba-Yakkha", "luv": "Luwati", "luw": "Luo (Cameroon)", "luy": "Luyia; Oluluyia", "luz": "Southern Luri", "lv": "Latvian", "lva": "Maku'a", "lvi": "Lavi", "lvk": "Lavukaleve", "lvs": "Standard Latvian", "lvu": "Levuka", "lwa": "Lwalu", "lwe": "Lewo Eleng", "lwg": "Wanga; Oluwanga", "lwh": "White Lachi", "lwl": "Eastern Lawa", "lwm": "Laomian", "lwo": "Luwo", "lws": "Malawian Sign Language", "lwt": "Lewotobi", "lwu": "Lawu", "lww": "Lewo", "lxm": "Lakurumau", "lya": "Layakha", "lyg": "Lyngngam", "lyn": "Luyana", "lzh": "Literary Chinese", "lzl": "Litzlitz", "lzn": "Leinong Naga", "lzz": "Laz", "maa": "San Jerónimo Tecóatl Mazatec", "mab": "Yutanduchi Mixtec", "mad": "Madurese", "mae": "Bo-Rukul", "maf": "Mafa", "mag": "Magahi", "mai": "Maithili", "maj": "Jalapa De Díaz Mazatec", "mak": "Makasar", "mam": "Mam", "man": "Mandingo; Manding", "map": "Austronesian languages", "maq": "Chiquihuitlán Mazatec", "mas": "Masai", "mat": "San Francisco Matlatzinca", "mau": "Huautla Mazatec", "mav": "Sateré-Mawé", "maw": "Mampruli", "max": "North Moluccan Malay", "maz": "Central Mazahua", "mba": "Higaonon", "mbb": "Western Bukidnon Manobo", "mbc": "Macushi", "mbd": "Dibabawon Manobo", "mbe": "Molale", "mbf": "Baba Malay", "mbh": "Mangseng", "mbi": "Ilianen Manobo", "mbj": "Nadëb", "mbk": "Malol", "mbl": "Maxakalí", "mbm": "Ombamba", "mbn": "Macaguán", "mbo": "Mbo (Cameroon)", "mbp": "Malayo", "mbq": "Maisin", "mbr": "Nukak Makú", "mbs": "Sarangani Manobo", "mbt": "Matigsalug Manobo", "mbu": "Mbula-Bwazza", "mbv": "Mbulungish", "mbw": "Maring", "mbx": "Mari (East Sepik Province)", "mby": "Memoni", "mbz": "Amoltepec Mixtec", "mca": "Maca", "mcb": "Machiguenga", "mcc": "Bitur", "mcd": "Sharanahua", "mce": "Itundujia Mixtec", "mcf": "Matsés", "mcg": "Mapoyo", "mch": "Maquiritari", "mci": "Mese", "mcj": "Mvanip", "mck": "Mbunda", "mcl": "Macaguaje", "mcm": "Malaccan Creole Portuguese", "mcn": "Masana", "mco": "Coatlán Mixe", "mcp": "Makaa", "mcq": "Ese", "mcr": "Menya", "mcs": "Mambai", "mct": "Mengisa", "mcu": "Cameroon Mambila", "mcv": "Minanibai", "mcw": "Mawa (Chad)", "mcx": "Mpiemo", "mcy": "South Watut", "mcz": "Mawan", "mda": "Mada (Nigeria)", "mdb": "Morigi", "mdc": "Male (Papua New Guinea)", "mdd": "Mbum", "mde": "Maba (Chad)", "mdf": "Moksha", "mdg": "Massalat", "mdh": "Maguindanaon", "mdi": "Mamvu", "mdj": "Mangbetu", "mdk": "Mangbutu", "mdl": "Maltese Sign Language", "mdm": "Mayogo", "mdn": "Mbati", "mdp": "Mbala", "mdq": "Mbole", "mdr": "Mandar", "mds": "Maria (Papua New Guinea)", "mdt": "Mbere", "mdu": "Mboko", "mdv": "Santa Lucía Monteverde Mixtec", "mdw": "Mbosi", "mdx": "Dizin", "mdy": "Male (Ethiopia)", "mdz": "Suruí Do Pará", "mea": "Menka", "meb": "Ikobi", "mec": "Marra", "med": "Melpa", "mee": "Mengen", "mef": "Megam", "meh": "Southwestern Tlaxiaco Mixtec", "mei": "Midob", "mej": "Meyah", "mek": "Mekeo", "mel": "Central Melanau", "mem": "Mangala", "men": "Mende (Sierra Leone)", "meo": "Kedah Malay", "mep": "Miriwoong", "meq": "Merey", "mer": "Meru", "mes": "Masmaje", "met": "Mato", "meu": "Motu", "mev": "Mano", "mew": "Maaka", "mey": "Hassaniyya", "mez": "Menominee", "mfa": "Pattani Malay", "mfb": "Bangka", "mfc": "Mba", "mfd": "Mendankwe-Nkwen", "mfe": "Morisyen", "mff": "Naki", "mfg": "Mogofin", "mfh": "Matal", "mfi": "Wandala", "mfj": "Mefele", "mfk": "North Mofu", "mfl": "Putai", "mfm": "Marghi South", "mfn": "Cross River Mbembe", "mfo": "Mbe", "mfp": "Makassar Malay", "mfq": "Moba", "mfr": "Marrithiyel", "mfs": "Mexican Sign Language", "mft": "Mokerang", "mfu": "Mbwela", "mfv": "Mandjak", "mfw": "Mulaha", "mfx": "Melo", "mfy": "Mayo", "mfz": "Mabaan", "mg": "Malagasy", "mga": "Middle Irish (900-1200)", "mgb": "Mararit", "mgc": "Morokodo", "mgd": "Moru", "mge": "Mango", "mgf": "Maklew", "mgg": "Mpumpong", "mgh": "Makhuwa-Meetto", "mgi": "Lijili", "mgj": "Abureni", "mgk": "Mawes", "mgl": "Maleu-Kilenge", "mgm": "Mambae", "mgn": "Mbangi", "mgo": "Meta'", "mgp": "Eastern Magar", "mgq": "Malila", "mgr": "Mambwe-Lungu", "mgs": "Manda (Tanzania)", "mgt": "Mongol", "mgu": "Mailu", "mgv": "Matengo", "mgw": "Matumbi", "mgy": "Mbunga", "mgz": "Mbugwe", "mh": "Marshallese", "mha": "Manda (India)", "mhb": "Mahongwe", "mhc": "Mocho", "mhd": "Mbugu", "mhe": "Besisi; Mah Meri", "mhf": "Mamaa", "mhg": "Margu", "mhi": "Ma'di", "mhj": "Mogholi", "mhk": "Mungaka", "mhl": "Mauwake", "mhm": "Makhuwa-Moniga", "mhn": "Mócheno", "mho": "Mashi (Zambia)", "mhp": "Balinese Malay", "mhq": "Mandan", "mhr": "Eastern Mari", "mhs": "Buru (Indonesia)", "mht": "Mandahuaca", "mhu": "Digaro-Mishmi; Darang Deng", "mhw": "Mbukushu", "mhx": "Maru; Lhaovo", "mhy": "Ma'anyan", "mhz": "Mor (Mor Islands)", "mi": "Maori", "mia": "Miami", "mib": "Atatláhuca Mixtec", "mic": "Mi'kmaq; Micmac", "mid": "Mandaic", "mie": "Ocotepec Mixtec", "mif": "Mofu-Gudur", "mig": "San Miguel El Grande Mixtec", "mih": "Chayuco Mixtec", "mii": "Chigmecatitlán Mixtec", "mij": "Abar; Mungbam", "mik": "Mikasuki", "mil": "Peñoles Mixtec", "mim": "Alacatlatzala Mixtec", "min": "Minangkabau", "mio": "Pinotepa Nacional Mixtec", "mip": "Apasco-Apoala Mixtec", "miq": "Mískito", "mir": "Isthmus Mixe", "mit": "Southern Puebla Mixtec", "miu": "Cacaloxtepec Mixtec", "miw": "Akoye", "mix": "Mixtepec Mixtec", "miy": "Ayutla Mixtec", "miz": "Coatzospan Mixtec", "mjb": "Makalero", "mjc": "San Juan Colorado Mixtec", "mjd": "Northwest Maidu", "mje": "Muskum", "mjg": "Tu", "mjh": "Mwera (Nyasa)", "mji": "Kim Mun", "mjj": "Mawak", "mjk": "Matukar", "mjl": "Mandeali", "mjm": "Medebur", "mjn": "Ma (Papua New Guinea)", "mjo": "Malankuravan", "mjp": "Malapandaram", "mjq": "Malaryan", "mjr": "Malavedan", "mjs": "Miship", "mjt": "Sauria Paharia", "mju": "Manna-Dora", "mjv": "Mannan", "mjw": "Karbi", "mjx": "Mahali", "mjy": "Mahican", "mjz": "Majhi", "mk": "Macedonian", "mka": "Mbre", "mkb": "Mal Paharia", "mkc": "Siliput", "mke": "Mawchi", "mkf": "Miya", "mkg": "Mak (China)", "mkh": "Mon-Khmer languages", "mki": "Dhatki", "mkj": "Mokilese", "mkk": "Byep", "mkl": "Mokole", "mkm": "Moklen", "mkn": "Kupang Malay", "mko": "Mingang Doso", "mkp": "Moikodi", "mkq": "Bay Miwok", "mkr": "Malas", "mks": "Silacayoapan Mixtec", "mkt": "Vamale", "mku": "Konyanka Maninka", "mkv": "Mafea", "mkw": "Kituba (Congo)", "mkx": "Kinamiging Manobo", "mky": "East Makian", "mkz": "Makasae", "ml": "Malayalam", "mla": "Malo", "mlb": "Mbule", "mlc": "Cao Lan", "mle": "Manambu", "mlf": "Mal", "mlh": "Mape", "mli": "Malimpung", "mlj": "Miltu", "mlk": "Ilwana; Kiwilwana", "mll": "Malua Bay", "mlm": "Mulam", "mln": "Malango", "mlo": "Mlomp", "mlp": "Bargam", "mlq": "Western Maninkakan", "mlr": "Vame", "mls": "Masalit", "mlu": "To'abaita", "mlv": "Motlav; Mwotlap", "mlw": "Moloko", "mlx": "Malfaxal; Naha'ai", "mlz": "Malaynon", "mma": "Mama", "mmb": "Momina", "mmc": "Michoacán Mazahua", "mmd": "Maonan", "mme": "Mae", "mmf": "Mundat", "mmg": "North Ambrym", "mmh": "Mehináku", "mmi": "Musar", "mmj": "Majhwar", "mmk": "Mukha-Dora", "mml": "Man Met", "mmm": "Maii", "mmn": "Mamanwa", "mmo": "Mangga Buang", "mmp": "Siawi", "mmq": "Musak", "mmr": "Western Xiangxi Miao", "mmt": "Malalamai", "mmu": "Mmaala", "mmv": "Miriti", "mmw": "Emae", "mmx": "Madak", "mmy": "Migaama", "mmz": "Mabaale", "mn": "Mongolian", "mna": "Mbula", "mnb": "Muna", "mnc": "Manchu", "mnd": "Mondé", "mne": "Naba", "mnf": "Mundani", "mng": "Eastern Mnong", "mnh": "Mono (Democratic Republic of Congo)", "mni": "Manipuri", "mnj": "Munji", "mnk": "Mandinka", "mnl": "Tiale", "mnm": "Mapena", "mnn": "Southern Mnong", "mno": "Manobo languages", "mnp": "Min Bei Chinese", "mnq": "Minriq", "mnr": "Mono (USA)", "mns": "Mansi", "mnu": "Mer", "mnv": "Rennell-Bellona", "mnw": "Mon", "mnx": "Manikion", "mny": "Manyawa", "mnz": "Moni", "moa": "Mwan", "moc": "Mocoví", "mod": "Mobilian", "moe": "Innu; Montagnais", "mog": "Mongondow", "moh": "Mohawk", "moi": "Mboi", "moj": "Monzombo", "mok": "Morori", "mom": "Mangue", "moo": "Monom", "mop": "Mopán Maya", "moq": "Mor (Bomberai Peninsula)", "mor": "Moro", "mos": "Mossi", "mot": "Barí", "mou": "Mogum", "mov": "Mohave", "mow": "Moi (Congo)", "mox": "Molima", "moy": "Shekkacho", "moz": "Mukulu; Gergiko", "mpa": "Mpoto", "mpb": "Malak Malak; Mullukmulluk", "mpc": "Mangarrayi", "mpd": "Machinere", "mpe": "Majang", "mpg": "Marba", "mph": "Maung", "mpi": "Mpade", "mpj": "Martu Wangka; Wangkajunga", "mpk": "Mbara (Chad)", "mpl": "Middle Watut", "mpm": "Yosondúa Mixtec", "mpn": "Mindiri", "mpo": "Miu", "mpp": "Migabac", "mpq": "Matís", "mpr": "Vangunu", "mps": "Dadibi", "mpt": "Mian", "mpu": "Makuráp", "mpv": "Mungkip", "mpw": "Mapidian", "mpx": "Misima-Panaeati", "mpy": "Mapia", "mpz": "Mpi", "mqa": "Maba (Indonesia)", "mqb": "Mbuko", "mqc": "Mangole", "mqe": "Matepi", "mqf": "Momuna", "mqg": "Kota Bangun Kutai Malay", "mqh": "Tlazoyaltepec Mixtec", "mqi": "Mariri", "mqj": "Mamasa", "mqk": "Rajah Kabunsuwan Manobo", "mql": "Mbelime", "mqm": "South Marquesan", "mqn": "Moronene", "mqo": "Modole", "mqp": "Manipa", "mqq": "Minokok", "mqr": "Mander", "mqs": "West Makian", "mqt": "Mok", "mqu": "Mandari", "mqv": "Mosimo", "mqw": "Murupi", "mqx": "Mamuju", "mqy": "Manggarai", "mqz": "Pano", "mr": "Marathi", "mra": "Mlabri", "mrb": "Marino", "mrc": "Maricopa", "mrd": "Western Magar", "mre": "Martha's Vineyard Sign Language", "mrf": "Elseng", "mrg": "Mising", "mrh": "Mara Chin", "mrj": "Western Mari", "mrk": "Hmwaveke", "mrl": "Mortlockese", "mrm": "Merlav; Mwerlap", "mrn": "Cheke Holo", "mro": "Mru", "mrp": "Morouas", "mrq": "North Marquesan", "mrr": "Maria (India)", "mrs": "Maragus", "mrt": "Marghi Central", "mru": "Mono (Cameroon)", "mrv": "Mangareva", "mrw": "Maranao", "mrx": "Maremgi; Dineor", "mry": "Mandaya", "mrz": "Marind", "ms": "Malay (macrolanguage)", "msb": "Masbatenyo", "msc": "Sankaran Maninka", "msd": "Yucatec Maya Sign Language", "mse": "Musey", "msf": "Mekwei", "msg": "Moraid", "msh": "Masikoro Malagasy", "msi": "Sabah Malay", "msj": "Ma (Democratic Republic of Congo)", "msk": "Mansaka", "msl": "Molof; Poule", "msm": "Agusan Manobo", "msn": "Vurës", "mso": "Mombum", "msp": "Maritsauá", "msq": "Caac", "msr": "Mongolian Sign Language", "mss": "West Masela", "msu": "Musom", "msv": "Maslam", "msw": "Mansoanka", "msx": "Moresada", "msy": "Aruamu", "msz": "Momare", "mt": "Maltese", "mta": "Cotabato Manobo", "mtb": "Anyin Morofo", "mtc": "Munit", "mtd": "Mualang", "mte": "Mono (Solomon Islands)", "mtf": "Murik (Papua New Guinea)", "mtg": "Una", "mth": "Munggui", "mti": "Maiwa (Papua New Guinea)", "mtj": "Moskona", "mtk": "Mbe'", "mtl": "Montol", "mtm": "Mator", "mtn": "Matagalpa", "mto": "Totontepec Mixe", "mtp": "Wichí Lhamtés Nocten", "mtq": "Muong", "mtr": "Mewari", "mts": "Yora", "mtt": "Mota", "mtu": "Tututepec Mixtec", "mtv": "Asaro'o", "mtw": "Southern Binukidnon", "mtx": "Tidaá Mixtec", "mty": "Nabi", "mua": "Mundang", "mub": "Mubi", "muc": "Ajumbu", "mud": "Mednyj Aleut", "mue": "Media Lengua", "mug": "Musgu", "muh": "Mündü", "mui": "Musi", "muj": "Mabire", "muk": "Mugom", "mum": "Maiwala", "mun": "Munda languages", "muo": "Nyong", "mup": "Malvi", "muq": "Eastern Xiangxi Miao", "mur": "Murle", "mus": "Creek", "mut": "Western Muria", "muu": "Yaaku", "muv": "Muthuvan", "mux": "Bo-Ung", "muy": "Muyang", "muz": "Mursi", "mva": "Manam", "mvb": "Mattole", "mvd": "Mamboru", "mve": "Marwari (Pakistan)", "mvf": "Peripheral Mongolian", "mvg": "Yucuañe Mixtec", "mvh": "Mulgi", "mvi": "Miyako", "mvk": "Mekmek", "mvl": "Mbara (Australia)", "mvn": "Minaveha", "mvo": "Marovo", "mvp": "Duri", "mvq": "Moere", "mvr": "Marau", "mvs": "Massep", "mvt": "Mpotovoro", "mvu": "Marfa", "mvv": "Tagal Murut", "mvw": "Machinga", "mvx": "Meoswar", "mvy": "Indus Kohistani", "mvz": "Mesqan", "mwa": "Mwatebu", "mwb": "Juwal", "mwc": "Are", "mwe": "Mwera (Chimwera)", "mwf": "Murrinh-Patha", "mwg": "Aiklep", "mwh": "Mouk-Aria", "mwi": "Labo; Ninde", "mwk": "Kita Maninkakan", "mwl": "Mirandese", "mwm": "Sar", "mwn": "Nyamwanga", "mwo": "Central Maewo", "mwp": "Kala Lagaw Ya", "mwq": "Mün Chin", "mwr": "Marwari", "mws": "Mwimbi-Muthambi", "mwt": "Moken", "mwu": "Mittu", "mwv": "Mentawai", "mww": "Hmong Daw", "mwz": "Moingi", "mxa": "Northwest Oaxaca Mixtec", "mxb": "Tezoatlán Mixtec", "mxc": "Manyika", "mxd": "Modang", "mxe": "Mele-Fila", "mxf": "Malgbe", "mxg": "Mbangala", "mxh": "Mvuba", "mxi": "Mozarabic", "mxj": "Miju-Mishmi; Geman Deng", "mxk": "Monumbo", "mxl": "Maxi Gbe", "mxm": "Meramera", "mxn": "Moi (Indonesia)", "mxo": "Mbowe", "mxp": "Tlahuitoltepec Mixe", "mxq": "Juquila Mixe", "mxr": "Murik (Malaysia)", "mxs": "Huitepec Mixtec", "mxt": "Jamiltepec Mixtec", "mxu": "Mada (Cameroon)", "mxv": "Metlatónoc Mixtec", "mxw": "Namo", "mxx": "Mahou; Mawukakan", "mxy": "Southeastern Nochixtlán Mixtec", "mxz": "Central Masela", "my": "Burmese", "myb": "Mbay", "myc": "Mayeka", "mye": "Myene", "myf": "Bambassi", "myg": "Manta", "myh": "Makah", "myj": "Mangayat", "myk": "Mamara Senoufo", "myl": "Moma", "mym": "Me'en", "myn": "Mayan languages", "myo": "Anfillo", "myp": "Pirahã", "myr": "Muniche", "mys": "Mesmes", "myu": "Mundurukú", "myv": "Erzya", "myw": "Muyuw", "myx": "Masaaba", "myy": "Macuna", "myz": "Classical Mandaic", "mza": "Santa María Zacatepec Mixtec", "mzb": "Tumzabt", "mzc": "Madagascar Sign Language", "mzd": "Malimba", "mze": "Morawa", "mzg": "Monastic Sign Language", "mzh": "Wichí Lhamtés Güisnay", "mzi": "Ixcatlán Mazatec", "mzj": "Manya", "mzk": "Nigeria Mambila", "mzl": "Mazatlán Mixe", "mzm": "Mumuye", "mzn": "Mazanderani", "mzo": "Matipuhy", "mzp": "Movima", "mzq": "Mori Atas", "mzr": "Marúbo", "mzs": "Macanese", "mzt": "Mintil", "mzu": "Inapang", "mzv": "Manza", "mzw": "Deg", "mzx": "Mawayana", "mzy": "Mozambican Sign Language", "mzz": "Maiadomu", "na": "Nauru", "naa": "Namla", "nab": "Southern Nambikuára", "nac": "Narak", "nae": "Naka'ela", "naf": "Nabak", "nag": "Naga Pidgin", "nah": "Nahuatl languages", "nai": "North American Indian languages", "naj": "Nalu", "nak": "Nakanai", "nal": "Nalik", "nam": "Ngan'gityemerri", "nan": "Min Nan Chinese", "nao": "Naaba", "nap": "Neapolitan", "naq": "Khoekhoe; Nama (Namibia)", "nar": "Iguta", "nas": "Naasioi", "nat": "Ca̱hungwa̱rya̱; Hungworo", "naw": "Nawuri", "nax": "Nakwi", "nay": "Ngarrindjeri", "naz": "Coatepec Nahuatl", "nb": "Norwegian Bokmål", "nba": "Nyemba", "nbb": "Ndoe", "nbc": "Chang Naga", "nbd": "Ngbinda", "nbe": "Konyak Naga", "nbg": "Nagarchal", "nbh": "Ngamo", "nbi": "Mao Naga", "nbj": "Ngarinyman", "nbk": "Nake", "nbm": "Ngbaka Ma'bo", "nbn": "Kuri", "nbo": "Nkukoli", "nbp": "Nnam", "nbq": "Nggem", "nbr": "Numana", "nbs": "Namibian Sign Language", "nbt": "Na", "nbu": "Rongmei Naga", "nbv": "Ngamambo", "nbw": "Southern Ngbandi", "nby": "Ningera", "nca": "Iyo", "ncb": "Central Nicobarese", "ncc": "Ponam", "ncd": "Nachering", "nce": "Yale", "ncf": "Notsi", "ncg": "Nisga'a", "nch": "Central Huasteca Nahuatl", "nci": "Classical Nahuatl", "ncj": "Northern Puebla Nahuatl", "nck": "Na-kara", "ncl": "Michoacán Nahuatl", "ncm": "Nambo", "ncn": "Nauna", "nco": "Sibe", "ncq": "Northern Katang", "ncr": "Ncane", "ncs": "Nicaraguan Sign Language", "nct": "Chothe Naga", "ncu": "Chumburung", "ncx": "Central Puebla Nahuatl", "ncz": "Natchez", "nd": "North Ndebele", "nda": "Ndasa", "ndb": "Kenswei Nsei", "ndc": "Ndau", "ndd": "Nde-Nsele-Nta", "ndf": "Nadruvian", "ndg": "Ndengereko", "ndh": "Ndali", "ndi": "Samba Leko", "ndj": "Ndamba", "ndk": "Ndaka", "ndl": "Ndolo", "ndm": "Ndam", "ndn": "Ngundi", "ndp": "Ndo", "ndq": "Ndombe", "ndr": "Ndoola", "nds": "Low German; Low Saxon", "ndt": "Ndunga", "ndu": "Dugun", "ndv": "Ndut", "ndw": "Ndobo", "ndx": "Nduga", "ndy": "Lutos", "ndz": "Ndogo", "ne": "Nepali (macrolanguage)", "nea": "Eastern Ngad'a", "neb": "Toura (Côte d'Ivoire)", "nec": "Nedebang", "ned": "Nde-Gbite", "nee": "Nêlêmwa-Nixumwak", "nef": "Nefamese", "neg": "Negidal", "neh": "Nyenkha", "nei": "Neo-Hittite", "nej": "Neko", "nek": "Neku", "nem": "Nemi", "nen": "Nengone", "neo": "Ná-Meo", "neq": "North Central Mixe", "ner": "Yahadian", "nes": "Bhoti Kinnauri", "net": "Nete", "neu": "Neo", "nev": "Nyaheun", "new": "Newari; Nepal Bhasa", "nex": "Neme", "ney": "Neyo", "nez": "Nez Perce", "nfa": "Dhao", "nfd": "Ahwai", "nfl": "Ayiwo; Äiwoo", "nfr": "Nafaanra", "nfu": "Mfumte", "ng": "Ndonga", "nga": "Ngbaka", "ngb": "Northern Ngbandi", "ngc": "Ngombe (Democratic Republic of Congo)", "ngd": "Ngando (Central African Republic)", "nge": "Ngemba", "ngf": "Trans-New Guinea languages", "ngg": "Ngbaka Manza", "ngh": "Nǁng", "ngi": "Ngizim", "ngj": "Ngie", "ngk": "Dalabon", "ngl": "Lomwe", "ngm": "Ngatik Men's Creole", "ngn": "Ngwo", "ngp": "Ngulu", "ngq": "Ngurimi; Ngoreme", "ngr": "Engdewu", "ngs": "Gvoko", "ngt": "Kriang; Ngeq", "ngu": "Guerrero Nahuatl", "ngv": "Nagumi", "ngw": "Ngwaba", "ngx": "Nggwahyi", "ngy": "Tibea", "ngz": "Ngungwel", "nha": "Nhanda", "nhb": "Beng", "nhc": "Tabasco Nahuatl", "nhd": "Chiripá; Ava Guaraní", "nhe": "Eastern Huasteca Nahuatl", "nhf": "Nhuwala", "nhg": "Tetelcingo Nahuatl", "nhh": "Nahari", "nhi": "Zacatlán-Ahuacatlán-Tepetzintla Nahuatl", "nhk": "Isthmus-Cosoleacaque Nahuatl", "nhm": "Morelos Nahuatl", "nhn": "Central Nahuatl", "nho": "Takuu", "nhp": "Isthmus-Pajapan Nahuatl", "nhq": "Huaxcaleca Nahuatl", "nhr": "Naro", "nht": "Ometepec Nahuatl", "nhu": "Noone", "nhv": "Temascaltepec Nahuatl", "nhw": "Western Huasteca Nahuatl", "nhx": "Isthmus-Mecayapan Nahuatl", "nhy": "Northern Oaxaca Nahuatl", "nhz": "Santa María La Alta Nahuatl", "nia": "Nias", "nib": "Nakame", "nic": "Niger-Kordofanian languages", "nid": "Ngandi", "nie": "Niellim", "nif": "Nek", "nig": "Ngalakgan", "nih": "Nyiha (Tanzania)", "nii": "Nii", "nij": "Ngaju", "nik": "Southern Nicobarese", "nil": "Nila", "nim": "Nilamba", "nin": "Ninzo", "nio": "Nganasan", "niq": "Nandi", "nir": "Nimboran", "nis": "Nimi", "nit": "Southeastern Kolami", "niu": "Niuean", "niv": "Gilyak", "niw": "Nimo", "nix": "Hema", "niy": "Ngiti", "niz": "Ningil", "nja": "Nzanyi", "njb": "Nocte Naga", "njd": "Ndonde Hamba", "njh": "Lotha Naga", "nji": "Gudanji", "njj": "Njen", "njl": "Njalgulgule", "njm": "Angami Naga", "njn": "Liangmai Naga", "njo": "Ao Naga", "njr": "Njerep", "njs": "Nisa", "njt": "Ndyuka-Trio Pidgin", "nju": "Ngadjunmaya", "njx": "Kunyi", "njy": "Njyem", "njz": "Nyishi", "nka": "Nkoya", "nkb": "Khoibu Naga", "nkc": "Nkongho", "nkd": "Koireng", "nke": "Duke", "nkf": "Inpui Naga", "nkg": "Nekgini", "nkh": "Khezha Naga", "nki": "Thangal Naga", "nkj": "Nakai", "nkk": "Nokuku", "nkm": "Namat", "nkn": "Nkangala", "nko": "Nkonya", "nkp": "Niuatoputapu", "nkq": "Nkami", "nkr": "Nukuoro", "nks": "North Asmat", "nkt": "Nyika (Tanzania)", "nku": "Bouna Kulango", "nkv": "Nyika (Malawi and Zambia)", "nkw": "Nkutu", "nkx": "Nkoroo", "nkz": "Nkari", "nl": "Dutch; Flemish", "nla": "Ngombale", "nlc": "Nalca", "nle": "East Nyala", "nlg": "Gela", "nli": "Grangali", "nlj": "Nyali", "nlk": "Ninia Yali", "nll": "Nihali", "nlm": "Mankiyali", "nlo": "Ngul", "nlq": "Lao Naga", "nlu": "Nchumbulu", "nlv": "Orizaba Nahuatl", "nlw": "Walangama", "nlx": "Nahali", "nly": "Nyamal", "nlz": "Nalögo", "nma": "Maram Naga", "nmb": "Big Nambas; V'ënen Taut", "nmc": "Ngam", "nmd": "Ndumu", "nme": "Mzieme Naga", "nmf": "Tangkhul Naga (India)", "nmg": "Kwasio", "nmh": "Monsang Naga", "nmi": "Nyam", "nmj": "Ngombe (Central African Republic)", "nmk": "Namakura", "nml": "Ndemli", "nmm": "Manangba", "nmn": "ǃXóõ", "nmo": "Moyon Naga", "nmp": "Nimanbur", "nmq": "Nambya", "nmr": "Nimbari", "nms": "Letemboi", "nmt": "Namonuito", "nmu": "Northeast Maidu", "nmv": "Ngamini", "nmw": "Nimoa; Rifao", "nmx": "Nama (Papua New Guinea)", "nmy": "Namuyi", "nmz": "Nawdm", "nn": "Norwegian Nynorsk", "nna": "Nyangumarta", "nnb": "Nande", "nnc": "Nancere", "nnd": "West Ambae", "nne": "Ngandyera", "nnf": "Ngaing", "nng": "Maring Naga", "nnh": "Ngiemboon", "nni": "North Nuaulu", "nnj": "Nyangatom", "nnk": "Nankina", "nnl": "Northern Rengma Naga", "nnm": "Namia", "nnn": "Ngete", "nnp": "Wancho Naga", "nnq": "Ngindo", "nnr": "Narungga", "nnt": "Nanticoke", "nnu": "Dwang", "nnv": "Nugunu (Australia)", "nnw": "Southern Nuni", "nny": "Nyangga", "nnz": "Nda'nda'", "no": "Norwegian", "noa": "Woun Meu", "noc": "Nuk", "nod": "Northern Thai", "noe": "Nimadi", "nof": "Nomane", "nog": "Nogai", "noh": "Nomu", "noi": "Noiri", "noj": "Nonuya", "nok": "Nooksack", "nol": "Nomlaki", "nom": "Nocamán", "non": "Old Norse", "nop": "Numanggang", "noq": "Ngongo", "nos": "Eastern Nisu", "not": "Nomatsiguenga", "nou": "Ewage-Notu", "nov": "Novial", "now": "Nyambo", "noy": "Noy", "noz": "Nayi", "npa": "Nar Phu", "npb": "Nupbikha", "npg": "Ponyo-Gongwang Naga", "nph": "Phom Naga", "npi": "Nepali (individual language)", "npl": "Southeastern Puebla Nahuatl", "npn": "Mondropolon", "npo": "Pochuri Naga", "nps": "Nipsan", "npu": "Puimei Naga", "npx": "Noipx", "npy": "Napu", "nqg": "Southern Nago", "nqk": "Kura Ede Nago", "nql": "Ngendelengo", "nqm": "Ndom", "nqn": "Nen", "nqo": "N'Ko; N’Ko", "nqq": "Kyan-Karyaw Naga", "nqt": "Nteng", "nqy": "Akyaung Ari Naga", "nr": "South Ndebele", "nra": "Ngom", "nrb": "Nara", "nrc": "Noric", "nre": "Southern Rengma Naga", "nrf": "Jèrriais; Guernésiais", "nrg": "Narango", "nri": "Chokri Naga", "nrk": "Ngarla", "nrl": "Ngarluma", "nrm": "Narom", "nrn": "Norn", "nrp": "North Picene", "nrr": "Norra; Nora", "nrt": "Northern Kalapuya", "nru": "Narua", "nrx": "Ngurmbur", "nrz": "Lala", "nsa": "Sangtam Naga", "nsb": "Lower Nossob", "nsc": "Nshi", "nsd": "Southern Nisu", "nse": "Nsenga", "nsf": "Northwestern Nisu", "nsg": "Ngasa", "nsh": "Ngoshie", "nsi": "Nigerian Sign Language", "nsk": "Naskapi", "nsl": "Norwegian Sign Language", "nsm": "Sumi Naga", "nsn": "Nehan", "nso": "Pedi; Northern Sotho; Sepedi", "nsp": "Nepalese Sign Language", "nsq": "Northern Sierra Miwok", "nsr": "Maritime Sign Language", "nss": "Nali", "nst": "Tase Naga", "nsu": "Sierra Negra Nahuatl", "nsv": "Southwestern Nisu", "nsw": "Navut", "nsx": "Nsongo", "nsy": "Nasal", "nsz": "Nisenan", "ntd": "Northern Tidung", "nte": "Nathembo", "ntg": "Ngantangarra", "nti": "Natioro", "ntj": "Ngaanyatjarra", "ntk": "Ikoma-Nata-Isenye", "ntm": "Nateni", "nto": "Ntomba", "ntp": "Northern Tepehuan", "ntr": "Delo", "ntu": "Natügu", "ntw": "Nottoway", "ntx": "Tangkhul Naga (Myanmar)", "nty": "Mantsi", "ntz": "Natanzi", "nua": "Yuanga", "nub": "Nubian languages", "nuc": "Nukuini", "nud": "Ngala", "nue": "Ngundu", "nuf": "Nusu", "nug": "Nungali", "nuh": "Ndunda", "nui": "Ngumbi", "nuj": "Nyole", "nuk": "Nuu-chah-nulth; Nuuchahnulth", "nul": "Nusa Laut", "num": "Niuafo'ou", "nun": "Anong", "nuo": "Nguôn", "nup": "Nupe-Nupe-Tako", "nuq": "Nukumanu", "nur": "Nukuria", "nus": "Nuer", "nut": "Nung (Viet Nam)", "nuu": "Ngbundu", "nuv": "Northern Nuni", "nuw": "Nguluwan", "nux": "Mehek", "nuy": "Nunggubuyu", "nuz": "Tlamacazapa Nahuatl", "nv": "Navajo; Navaho", "nvh": "Nasarian", "nvm": "Namiae", "nvo": "Nyokon", "nwa": "Nawathinehena", "nwb": "Nyabwa", "nwc": "Classical Newari; Classical Nepal Bhasa; Old Newari", "nwe": "Ngwe", "nwg": "Ngayawung", "nwi": "Southwest Tanna", "nwm": "Nyamusa-Molo", "nwo": "Nauo", "nwr": "Nawaru", "nww": "Ndwewe", "nwx": "Middle Newar", "nwy": "Nottoway-Meherrin", "nxa": "Nauete", "nxd": "Ngando (Democratic Republic of Congo)", "nxe": "Nage", "nxg": "Ngad'a", "nxi": "Nindi", "nxk": "Koki Naga", "nxl": "South Nuaulu", "nxm": "Numidian", "nxn": "Ngawun", "nxo": "Ndambomo", "nxq": "Naxi", "nxr": "Ninggerum", "nxx": "Nafri", "ny": "Nyanja; Chewa; Chichewa", "nyb": "Nyangbo", "nyc": "Nyanga-li", "nyd": "Nyore; Olunyole", "nye": "Nyengo", "nyf": "Giryama; Kigiryama", "nyg": "Nyindu", "nyh": "Nyikina", "nyi": "Ama (Sudan)", "nyj": "Nyanga", "nyk": "Nyaneka", "nyl": "Nyeu", "nym": "Nyamwezi", "nyn": "Nyankole", "nyo": "Nyoro", "nyp": "Nyang'i", "nyq": "Nayini", "nyr": "Nyiha (Malawi)", "nys": "Nyungar", "nyt": "Nyawaygi", "nyu": "Nyungwe", "nyv": "Nyulnyul", "nyw": "Nyaw", "nyx": "Nganyaywana", "nyy": "Nyakyusa-Ngonde", "nza": "Tigon Mbembe", "nzb": "Njebi", "nzd": "Nzadi", "nzi": "Nzima", "nzk": "Nzakara", "nzm": "Zeme Naga", "nzs": "New Zealand Sign Language", "nzu": "Teke-Nzikou", "nzy": "Nzakambay", "nzz": "Nanga Dama Dogon", "oaa": "Orok", "oac": "Oroch", "oar": "Old Aramaic (up to 700 BCE); Ancient Aramaic (up to 700 BCE)", "oav": "Old Avar", "obi": "Obispeño", "obk": "Southern Bontok", "obl": "Oblo", "obm": "Moabite", "obo": "Obo Manobo", "obr": "Old Burmese", "obt": "Old Breton", "obu": "Obulom", "oc": "Occitan (post 1500)", "oca": "Ocaina", "och": "Old Chinese", "ocm": "Old Cham", "oco": "Old Cornish", "ocu": "Atzingo Matlatzinca", "oda": "Odut", "odk": "Od", "odt": "Old Dutch", "odu": "Odual", "ofo": "Ofo", "ofs": "Old Frisian", "ofu": "Efutop", "ogb": "Ogbia", "ogc": "Ogbah", "oge": "Old Georgian", "ogg": "Ogbogolo", "ogo": "Khana", "ogu": "Ogbronuagum", "oht": "Old Hittite", "ohu": "Old Hungarian", "oia": "Oirata", "oie": "Okolie", "oin": "Inebu One", "oj": "Ojibwa", "ojb": "Northwestern Ojibwa", "ojc": "Central Ojibwa", "ojg": "Eastern Ojibwa", "ojp": "Old Japanese", "ojs": "Severn Ojibwa", "ojv": "Ontong Java", "ojw": "Western Ojibwa", "oka": "Okanagan", "okb": "Okobo", "okc": "Kobo", "okd": "Okodia", "oke": "Okpe (Southwestern Edo)", "okg": "Koko Babangk", "okh": "Koresh-e Rostam", "oki": "Okiek", "okj": "Oko-Juwoi", "okk": "Kwamtim One", "okl": "Old Kentish Sign Language", "okm": "Middle Korean (10th-16th cent.)", "okn": "Oki-No-Erabu", "oko": "Old Korean (3rd-9th cent.)", "okr": "Kirike", "oks": "Oko-Eni-Osayen", "oku": "Oku", "okv": "Orokaiva", "okx": "Okpe (Northwestern Edo)", "okz": "Old Khmer", "ola": "Walungge", "old": "Mochi", "ole": "Olekha", "olk": "Olkol", "olm": "Oloma", "olo": "Livvi", "olr": "Olrat", "olt": "Old Lithuanian", "olu": "Kuvale", "om": "Oromo", "oma": "Omaha-Ponca", "omb": "East Ambae", "omc": "Mochica", "omg": "Omagua", "omi": "Omi", "omk": "Omok", "oml": "Ombo", "omn": "Minoan", "omo": "Utarmbung", "omp": "Old Manipuri", "omq": "Oto-Manguean languages", "omr": "Old Marathi", "omt": "Omotik", "omu": "Omurano", "omv": "Omotic languages", "omw": "South Tairora", "omx": "Old Mon", "omy": "Old Malay", "ona": "Ona", "onb": "Lingao", "one": "Oneida", "ong": "Olo", "oni": "Onin", "onj": "Onjob", "onk": "Kabore One", "onn": "Onobasulu", "ono": "Onondaga", "onp": "Sartang", "onr": "Northern One", "ons": "Ono", "ont": "Ontenu", "onu": "Unua", "onw": "Old Nubian", "onx": "Onin Based Pidgin", "ood": "Tohono O'odham", "oog": "Ong", "oon": "Önge", "oor": "Oorlams", "oos": "Old Ossetic", "opa": "Okpamheri", "opk": "Kopkaka", "opm": "Oksapmin", "opo": "Opao", "opt": "Opata", "opy": "Ofayé", "or": "Oriya (macrolanguage); Odia (macrolanguage)", "ora": "Oroha", "orc": "Orma", "ore": "Orejón", "org": "Oring", "orh": "Oroqen", "orn": "Orang Kanaq", "oro": "Orokolo", "orr": "Oruma", "ors": "Orang Seletar", "ort": "Adivasi Oriya", "oru": "Ormuri", "orv": "Old Russian", "orw": "Oro Win", "orx": "Oro", "ory": "Odia (individual language); Oriya (individual language)", "orz": "Ormu", "os": "Ossetian; Ossetic", "osa": "Osage", "osc": "Oscan", "osi": "Osing", "osn": "Old Sundanese", "oso": "Ososo", "osp": "Old Spanish", "ost": "Osatu", "osu": "Southern One", "osx": "Old Saxon", "ota": "Ottoman Turkish (1500-1928)", "otb": "Old Tibetan", "otd": "Ot Danum", "ote": "Mezquital Otomi", "oti": "Oti", "otk": "Old Turkish", "otl": "Tilapa Otomi", "otm": "Eastern Highland Otomi", "otn": "Tenango Otomi", "oto": "Otomian languages", "otq": "Querétaro Otomi", "otr": "Otoro", "ots": "Estado de México Otomi", "ott": "Temoaya Otomi", "otu": "Otuke", "otw": "Ottawa", "otx": "Texcatepec Otomi", "oty": "Old Tamil", "otz": "Ixtenco Otomi", "oua": "Tagargrent", "oub": "Glio-Oubi", "oue": "Oune", "oui": "Old Uighur", "oum": "Ouma", "ovd": "Elfdalian; Övdalian", "owi": "Owiniga", "owl": "Old Welsh", "oyb": "Oy", "oyd": "Oyda", "oym": "Wayampi", "oyy": "Oya'oya", "ozm": "Koonzime", "pa": "Panjabi; Punjabi", "paa": "Papuan languages", "pab": "Parecís", "pac": "Pacoh", "pad": "Paumarí", "pae": "Pagibete", "paf": "Paranawát", "pag": "Pangasinan", "pah": "Tenharim", "pai": "Pe", "pak": "Parakanã", "pal": "Pahlavi", "pam": "Pampanga; Kapampangan", "pao": "Northern Paiute", "pap": "Papiamento", "paq": "Parya", "par": "Panamint; Timbisha", "pas": "Papasena", "pau": "Palauan", "pav": "Pakaásnovos", "paw": "Pawnee", "pax": "Pankararé", "pay": "Pech", "paz": "Pankararú", "pbb": "Páez", "pbc": "Patamona", "pbe": "Mezontla Popoloca", "pbf": "Coyotepec Popoloca", "pbg": "Paraujano", "pbh": "E'ñapa Woromaipu", "pbi": "Parkwa", "pbl": "Mak (Nigeria)", "pbm": "Puebla Mazatec", "pbn": "Kpasam", "pbo": "Papel", "pbp": "Badyara", "pbr": "Pangwa", "pbs": "Central Pame", "pbt": "Southern Pashto", "pbu": "Northern Pashto", "pbv": "Pnar", "pby": "Pyu (Papua New Guinea)", "pca": "Santa Inés Ahuatempan Popoloca", "pcb": "Pear", "pcc": "Bouyei", "pcd": "Picard", "pce": "Ruching Palaung", "pcf": "Paliyan", "pcg": "Paniya", "pch": "Pardhan", "pci": "Duruwa", "pcj": "Parenga", "pck": "Paite Chin", "pcl": "Pardhi", "pcm": "Nigerian Pidgin", "pcn": "Piti", "pcp": "Pacahuara", "pcw": "Pyapun", "pda": "Anam", "pdc": "Pennsylvania German", "pdi": "Pa Di", "pdn": "Podena; Fedan", "pdo": "Padoe", "pdt": "Plautdietsch", "pdu": "Kayan", "pea": "Peranakan Indonesian", "peb": "Eastern Pomo", "ped": "Mala (Papua New Guinea)", "pee": "Taje", "pef": "Northeastern Pomo", "peg": "Pengo", "peh": "Bonan", "pei": "Chichimeca-Jonaz", "pej": "Northern Pomo", "pek": "Penchal", "pel": "Pekal", "pem": "Phende", "peo": "Old Persian (ca. 600-400 B.C.)", "pep": "Kunja", "peq": "Southern Pomo", "pes": "Iranian Persian", "pev": "Pémono", "pex": "Petats", "pey": "Petjo", "pez": "Eastern Penan", "pfa": "Pááfang", "pfe": "Pere", "pfl": "Pfaelzisch", "pga": "Sudanese Creole Arabic", "pgd": "Gāndhārī", "pgg": "Pangwali", "pgi": "Pagi", "pgk": "Rerep", "pgl": "Primitive Irish", "pgn": "Paelignian", "pgs": "Pangseng", "pgu": "Pagu", "pgz": "Papua New Guinean Sign Language", "pha": "Pa-Hng", "phd": "Phudagi", "phg": "Phuong", "phh": "Phukha", "phi": "Philippine languages", "phj": "Pahari", "phk": "Phake", "phl": "Phalura; Palula", "phm": "Phimbi", "phn": "Phoenician", "pho": "Phunoi", "phq": "Phana'", "phr": "Pahari-Potwari", "pht": "Phu Thai", "phu": "Phuan", "phv": "Pahlavani", "phw": "Phangduwali", "pi": "Pali", "pia": "Pima Bajo", "pib": "Yine", "pic": "Pinji", "pid": "Piaroa", "pie": "Piro", "pif": "Pingelapese", "pig": "Pisabo", "pih": "Pitcairn-Norfolk", "pij": "Pijao", "pil": "Yom", "pim": "Powhatan", "pin": "Piame", "pio": "Piapoco", "pip": "Pero", "pir": "Piratapuyo", "pis": "Pijin", "pit": "Pitta Pitta", "piu": "Pintupi-Luritja", "piv": "Pileni; Vaeakau-Taumako", "piw": "Pimbwe", "pix": "Piu", "piy": "Piya-Kwonci", "piz": "Pije", "pjt": "Pitjantjatjara", "pka": "Ardhamāgadhī Prākrit", "pkb": "Pokomo; Kipfokomo", "pkc": "Paekche", "pkg": "Pak-Tong", "pkh": "Pankhu", "pkn": "Pakanha", "pko": "Pökoot", "pkp": "Pukapuka", "pkr": "Attapady Kurumba", "pks": "Pakistan Sign Language", "pkt": "Maleng", "pku": "Paku", "pl": "Polish", "pla": "Miani", "plb": "Polonombauk", "plc": "Central Palawano", "pld": "Polari", "ple": "Palu'e", "plf": "Central Malayo-Polynesian languages", "plg": "Pilagá", "plh": "Paulohi", "plj": "Polci", "plk": "Kohistani Shina", "pll": "Shwe Palaung", "pln": "Palenquero", "plo": "Oluta Popoluca", "plq": "Palaic", "plr": "Palaka Senoufo", "pls": "San Marcos Tlacoyalco Popoloca; San Marcos Tlalcoyalco Popoloca", "plt": "Plateau Malagasy", "plu": "Palikúr", "plv": "Southwest Palawano", "plw": "Brooke's Point Palawano", "ply": "Bolyu", "plz": "Paluan", "pma": "Paama", "pmb": "Pambia", "pmd": "Pallanganmiddang", "pme": "Pwaamei", "pmf": "Pamona", "pmh": "Māhārāṣṭri Prākrit", "pmi": "Northern Pumi", "pmj": "Southern Pumi", "pmk": "Pamlico", "pml": "Lingua Franca", "pmm": "Pomo", "pmn": "Pam", "pmo": "Pom", "pmq": "Northern Pame", "pmr": "Paynamar", "pms": "Piemontese", "pmt": "Tuamotuan", "pmw": "Plains Miwok", "pmx": "Poumei Naga", "pmy": "Papuan Malay", "pmz": "Southern Pame", "pna": "Punan Bah-Biau", "pnb": "Western Panjabi", "pnc": "Pannei", "pnd": "Mpinda", "pne": "Western Penan", "png": "Pangu; Pongu", "pnh": "Penrhyn", "pni": "Aoheng", "pnj": "Pinjarup", "pnk": "Paunaka", "pnl": "Paleni", "pnm": "Punan Batu 1", "pnn": "Pinai-Hagahai", "pno": "Panobo", "pnp": "Pancana", "pnq": "Pana (Burkina Faso)", "pnr": "Panim", "pns": "Ponosakan", "pnt": "Pontic", "pnu": "Jiongnai Bunu", "pnv": "Pinigura", "pnw": "Banyjima; Panytyima", "pnx": "Phong-Kniang", "pny": "Pinyin", "pnz": "Pana (Central African Republic)", "poc": "Poqomam", "poe": "San Juan Atzingo Popoloca", "pof": "Poke", "pog": "Potiguára", "poh": "Poqomchi'", "poi": "Highland Popoluca", "pok": "Pokangá", "pom": "Southeastern Pomo", "pon": "Pohnpeian", "poo": "Central Pomo", "pop": "Pwapwâ", "poq": "Texistepec Popoluca", "pos": "Sayula Popoluca", "pot": "Potawatomi", "pov": "Upper Guinea Crioulo", "pow": "San Felipe Otlaltepec Popoloca", "pox": "Polabian", "poy": "Pogolo", "poz": "Malayo-Polynesian languages", "ppe": "Papi", "ppi": "Paipai", "ppk": "Uma", "ppl": "Pipil; Nicarao", "ppm": "Papuma", "ppn": "Papapana", "ppo": "Folopa", "ppp": "Pelende", "ppq": "Pei", "pps": "San Luís Temalacayuca Popoloca", "ppt": "Pare", "ppu": "Papora", "pqa": "Pa'a", "pqe": "Eastern Malayo-Polynesian languages", "pqm": "Malecite-Passamaquoddy", "pqw": "Western Malayo-Polynesian languages", "pra": "Prakrit languages", "prc": "Parachi", "prd": "Parsi-Dari", "pre": "Principense", "prf": "Paranan", "prg": "Prussian", "prh": "Porohanon", "pri": "Paicî", "prk": "Parauk", "prl": "Peruvian Sign Language", "prm": "Kibiri", "prn": "Prasuni", "pro": "Old Provençal (to 1500); Old Occitan (to 1500)", "prp": "Parsi", "prq": "Ashéninka Perené", "prr": "Puri", "prs": "Dari; Afghan Persian", "prt": "Phai", "pru": "Puragi", "prw": "Parawen", "prx": "Purik", "prz": "Providencia Sign Language", "ps": "Pushto; Pashto", "psa": "Asue Awyu", "psc": "Iranian Sign Language; Persian Sign Language", "psd": "Plains Indian Sign Language", "pse": "Central Malay", "psg": "Penang Sign Language", "psh": "Southwest Pashai; Southwest Pashayi", "psi": "Southeast Pashai; Southeast Pashayi", "psl": "Puerto Rican Sign Language", "psm": "Pauserna", "psn": "Panasuan", "pso": "Polish Sign Language", "psp": "Philippine Sign Language", "psq": "Pasi", "psr": "Portuguese Sign Language", "pss": "Kaulong", "pst": "Central Pashto", "psu": "Sauraseni Prākrit", "psw": "Port Sandwich", "psy": "Piscataway", "pt": "Portuguese", "pta": "Pai Tavytera", "pth": "Pataxó Hã-Ha-Hãe", "pti": "Pindiini; Wangkatha", "ptn": "Patani", "pto": "Zo'é", "ptp": "Patep", "ptq": "Pattapu", "ptr": "Piamatsina", "ptt": "Enrekang", "ptu": "Bambam", "ptv": "Port Vato", "ptw": "Pentlatch", "pty": "Pathiya", "pua": "Western Highland Purepecha", "pub": "Purum", "puc": "Punan Merap", "pud": "Punan Aput", "pue": "Puelche", "puf": "Punan Merah", "pug": "Phuie", "pui": "Puinave", "puj": "Punan Tubu", "pum": "Puma", "puo": "Puoc", "pup": "Pulabu", "puq": "Puquina", "pur": "Puruborá", "put": "Putoh", "puu": "Punu", "puw": "Puluwatese", "pux": "Puare", "puy": "Purisimeño", "pwa": "Pawaia", "pwb": "Panawa", "pwg": "Gapapaiwa", "pwi": "Patwin", "pwm": "Molbog", "pwn": "Paiwan", "pwo": "Pwo Western Karen", "pwr": "Powari", "pww": "Pwo Northern Karen", "pxm": "Quetzaltepec Mixe", "pye": "Pye Krumen", "pym": "Fyam", "pyn": "Poyanáwa", "pys": "Paraguayan Sign Language; Lengua de Señas del Paraguay", "pyu": "Puyuma", "pyx": "Pyu (Myanmar)", "pyy": "Pyen", "pzh": "Pazeh", "pzn": "Jejara Naga; Para Naga", "qu": "Quechua", "qua": "Quapaw", "qub": "Huallaga Huánuco Quechua", "quc": "K'iche'; Quiché", "qud": "Calderón Highland Quichua", "quf": "Lambayeque Quechua", "qug": "Chimborazo Highland Quichua", "quh": "South Bolivian Quechua", "qui": "Quileute", "quk": "Chachapoyas Quechua", "qul": "North Bolivian Quechua", "qum": "Sipacapense", "qun": "Quinault", "qup": "Southern Pastaza Quechua", "quq": "Quinqui", "qur": "Yanahuanca Pasco Quechua", "qus": "Santiago del Estero Quichua", "quv": "Sacapulteco", "quw": "Tena Lowland Quichua", "qux": "Yauyos Quechua", "quy": "Ayacucho Quechua", "quz": "Cusco Quechua", "qva": "Ambo-Pasco Quechua", "qvc": "Cajamarca Quechua", "qve": "Eastern Apurímac Quechua", "qvh": "Huamalíes-Dos de Mayo Huánuco Quechua", "qvi": "Imbabura Highland Quichua", "qvj": "Loja Highland Quichua", "qvl": "Cajatambo North Lima Quechua", "qvm": "Margos-Yarowilca-Lauricocha Quechua", "qvn": "North Junín Quechua", "qvo": "Napo Lowland Quechua", "qvp": "Pacaraos Quechua", "qvs": "San Martín Quechua", "qvw": "Huaylla Wanca Quechua", "qvy": "Queyu", "qvz": "Northern Pastaza Quichua", "qwa": "Corongo Ancash Quechua", "qwc": "Classical Quechua", "qwe": "Quechuan (family)", "qwh": "Huaylas Ancash Quechua", "qwm": "Kuman (Russia)", "qws": "Sihuas Ancash Quechua", "qwt": "Kwalhioqua-Tlatskanai", "qxa": "Chiquián Ancash Quechua", "qxc": "Chincha Quechua", "qxh": "Panao Huánuco Quechua", "qxl": "Salasaca Highland Quichua", "qxn": "Northern Conchucos Ancash Quechua", "qxo": "Southern Conchucos Ancash Quechua", "qxp": "Puno Quechua", "qxq": "Qashqa'i", "qxr": "Cañar Highland Quichua", "qxs": "Southern Qiang", "qxt": "Santa Ana de Tusi Pasco Quechua", "qxu": "Arequipa-La Unión Quechua", "qxw": "Jauja Wanca Quechua", "qya": "Quenya", "qyp": "Quiripi", "raa": "Dungmali", "rab": "Camling", "rac": "Rasawa", "rad": "Rade", "raf": "Western Meohang", "rag": "Logooli; Lulogooli", "rah": "Rabha", "rai": "Ramoaaina", "raj": "Rajasthani", "rak": "Tulu-Bohuai", "ral": "Ralte", "ram": "Canela", "ran": "Riantana", "rao": "Rao", "rap": "Rapanui", "raq": "Saam", "rar": "Rarotongan; Cook Islands Maori", "ras": "Tegali", "rat": "Razajerdi", "rau": "Raute", "rav": "Sampang", "raw": "Rawang", "rax": "Rang", "ray": "Rapa", "raz": "Rahambuu", "rbb": "Rumai Palaung", "rbk": "Northern Bontok", "rbl": "Miraya Bikol", "rbp": "Barababaraba", "rcf": "Réunion Creole French", "rdb": "Rudbari", "rea": "Rerau", "reb": "Rembong", "ree": "Rejang Kayan", "reg": "Kara (Tanzania)", "rei": "Reli", "rej": "Rejang", "rel": "Rendille", "rem": "Remo", "ren": "Rengao", "rer": "Rer Bare", "res": "Reshe", "ret": "Retta", "rey": "Reyesano", "rga": "Roria", "rge": "Romano-Greek", "rgk": "Rangkas", "rgn": "Romagnol", "rgr": "Resígaro", "rgs": "Southern Roglai", "rgu": "Ringgou", "rhg": "Rohingya", "rhp": "Yahang", "ria": "Riang (India)", "rib": "Bribri Sign Language", "rif": "Tarifit", "ril": "Riang Lang; Riang (Myanmar)", "rim": "Nyaturu", "rin": "Nungu", "rir": "Ribun", "rit": "Ritharrngu", "riu": "Riung", "rjg": "Rajong", "rji": "Raji", "rjs": "Rajbanshi", "rka": "Kraol", "rkb": "Rikbaktsa", "rkh": "Rakahanga-Manihiki", "rki": "Rakhine", "rkm": "Marka", "rkt": "Rangpuri; Kamta", "rkw": "Arakwal", "rm": "Romansh", "rma": "Rama", "rmb": "Rembarrnga", "rmc": "Carpathian Romani", "rmd": "Traveller Danish", "rme": "Angloromani", "rmf": "Kalo Finnish Romani", "rmg": "Traveller Norwegian", "rmh": "Murkim", "rmi": "Lomavren", "rmk": "Romkun", "rml": "Baltic Romani", "rmm": "Roma", "rmn": "Balkan Romani", "rmo": "Sinte Romani", "rmp": "Rempi", "rmq": "Caló", "rms": "Romanian Sign Language", "rmt": "Domari", "rmu": "Tavringer Romani", "rmv": "Romanova", "rmw": "Welsh Romani", "rmx": "Romam", "rmy": "Vlax Romani", "rmz": "Marma", "rn": "Rundi", "rnb": "Brunca Sign Language", "rnd": "Ruund", "rng": "Ronga", "rnl": "Ranglong", "rnn": "Roon", "rnp": "Rongpo", "rnr": "Nari Nari", "rnw": "Rungwa", "ro": "Romanian; Moldavian; Moldovan", "roa": "Romance languages", "rob": "Tae'", "roc": "Cacgia Roglai", "rod": "Rogo", "roe": "Ronji", "rof": "Rombo", "rog": "Northern Roglai", "rol": "Romblomanon", "rom": "Romany", "roo": "Rotokas", "rop": "Kriol", "ror": "Rongga", "rou": "Runga", "row": "Dela-Oenale", "rpn": "Repanbitip", "rpt": "Rapting", "rri": "Ririo", "rro": "Waima", "rrt": "Arritinngithigh", "rsb": "Romano-Serbian", "rsk": "Ruthenian; Rusyn", "rsl": "Russian Sign Language", "rsm": "Miriwoong Sign Language", "rsn": "Rwandan Sign Language", "rtc": "Rungtu Chin", "rth": "Ratahan", "rtm": "Rotuman", "rts": "Yurats", "rtw": "Rathawi", "ru": "Russian", "rub": "Gungu", "ruc": "Ruuli", "rue": "Rusyn", "ruf": "Luguru", "rug": "Roviana", "ruh": "Ruga", "rui": "Rufiji", "ruk": "Che", "ruo": "Istro Romanian", "rup": "Macedo-Romanian; Aromanian; Arumanian", "ruq": "Megleno Romanian", "rut": "Rutul", "ruu": "Lanas Lobu", "ruy": "Mala (Nigeria)", "ruz": "Ruma", "rw": "Kinyarwanda", "rwa": "Rawo", "rwk": "Rwa", "rwl": "Ruwila", "rwm": "Amba (Uganda)", "rwo": "Rawa", "rwr": "Marwari (India)", "rxd": "Ngardi", "rxw": "Karuwali; Garuwali", "ryn": "Northern Amami-Oshima", "rys": "Yaeyama", "ryu": "Central Okinawan", "rzh": "Rāziḥī", "sa": "Sanskrit", "saa": "Saba", "sab": "Buglere", "sac": "Meskwaki", "sad": "Sandawe", "sae": "Sabanê", "saf": "Safaliba", "sah": "Yakut", "sai": "South American Indian languages", "saj": "Sahu", "sak": "Sake", "sal": "Salishan languages", "sam": "Samaritan Aramaic", "sao": "Sause", "saq": "Samburu", "sar": "Saraveca", "sas": "Sasak", "sat": "Santali", "sau": "Saleman", "sav": "Saafi-Saafi", "saw": "Sawi", "sax": "Sa", "say": "Saya", "saz": "Saurashtra", "sba": "Ngambay", "sbb": "Simbo", "sbc": "Kele (Papua New Guinea)", "sbd": "Southern Samo", "sbe": "Saliba", "sbf": "Chabu; Shabo", "sbg": "Seget", "sbh": "Sori-Harengan", "sbi": "Seti", "sbj": "Surbakhal", "sbk": "Safwa", "sbl": "Botolan Sambal", "sbm": "Sagala", "sbn": "Sindhi Bhil", "sbo": "Sabüm", "sbp": "Sangu (Tanzania)", "sbq": "Sileibi", "sbr": "Sembakung Murut", "sbs": "Subiya", "sbt": "Kimki", "sbu": "Stod Bhoti", "sbv": "Sabine", "sbw": "Simba", "sbx": "Seberuang", "sby": "Soli", "sbz": "Sara Kaba", "sc": "Sardinian", "scb": "Chut", "sce": "Dongxiang", "scf": "San Miguel Creole French", "scg": "Sanggau", "sch": "Sakachep", "sci": "Sri Lankan Creole Malay", "sck": "Sadri", "scl": "Shina", "scn": "Sicilian", "sco": "Scots", "scp": "Hyolmo; Helambu Sherpa", "scq": "Sa'och", "scs": "North Slavey", "sct": "Southern Katang", "scu": "Shumcho", "scv": "Sheni", "scw": "Sha", "scx": "Sicel", "sd": "Sindhi", "sda": "Toraja-Sa'dan", "sdb": "Shabak", "sdc": "Sassarese Sardinian", "sde": "Surubu", "sdf": "Sarli", "sdg": "Savi", "sdh": "Southern Kurdish", "sdj": "Suundi", "sdk": "Sos Kundi", "sdl": "Saudi Arabian Sign Language", "sdn": "Gallurese Sardinian", "sdo": "Bukar-Sadung Bidayuh", "sdp": "Sherdukpen", "sdq": "Semandang", "sdr": "Oraon Sadri", "sds": "Sened", "sdt": "Shuadit", "sdu": "Sarudu", "sdv": "Eastern Sudanic languages", "sdx": "Sibu Melanau", "sdz": "Sallands", "se": "Northern Sami", "sea": "Semai", "seb": "Shempire Senoufo", "sec": "Sechelt", "sed": "Sedang", "see": "Seneca", "sef": "Cebaara Senoufo", "seg": "Segeju", "seh": "Sena", "sei": "Seri", "sej": "Sene", "sek": "Sekani", "sel": "Selkup", "sem": "Semitic languages", "sen": "Nanerigé Sénoufo", "seo": "Suarmin", "sep": "Sìcìté Sénoufo", "seq": "Senara Sénoufo", "ser": "Serrano", "ses": "Koyraboro Senni Songhai", "set": "Sentani", "seu": "Serui-Laut", "sev": "Nyarafolo Senoufo", "sew": "Sewa Bay", "sey": "Secoya", "sez": "Senthang Chin", "sfb": "Langue des signes de Belgique Francophone; French Belgian Sign Language", "sfe": "Eastern Subanen", "sfm": "Small Flowery Miao", "sfs": "South African Sign Language", "sfw": "Sehwi", "sg": "Sango", "sga": "Old Irish (to 900)", "sgb": "Mag-antsi Ayta", "sgc": "Kipsigis", "sgd": "Surigaonon", "sge": "Segai", "sgg": "Swiss-German Sign Language", "sgh": "Shughni", "sgi": "Suga", "sgj": "Surgujia", "sgk": "Sangkong", "sgm": "Singa", "sgn": "Sign languages", "sgp": "Singpho", "sgr": "Sangisari", "sgs": "Samogitian", "sgt": "Brokpake", "sgu": "Salas", "sgw": "Sebat Bet Gurage", "sgx": "Sierra Leone Sign Language", "sgy": "Sanglechi", "sgz": "Sursurunga", "sh": "Serbo-Croatian", "sha": "Shall-Zwall", "shb": "Ninam", "shc": "Sonde", "shd": "Kundal Shahi", "she": "Sheko", "shg": "Shua", "shh": "Shoshoni", "shi": "Tachelhit", "shj": "Shatt", "shk": "Shilluk", "shl": "Shendu", "shm": "Shahrudi", "shn": "Shan", "sho": "Shanga", "shp": "Shipibo-Conibo", "shq": "Sala", "shr": "Shi", "shs": "Shuswap", "sht": "Shasta", "shu": "Chadian Arabic", "shv": "Shehri", "shw": "Shwai", "shx": "She", "shy": "Tachawit", "shz": "Syenara Senoufo", "si": "Sinhala; Sinhalese", "sia": "Akkala Sami", "sib": "Sebop", "sid": "Sidamo", "sie": "Simaa", "sif": "Siamou", "sig": "Paasaal", "sih": "Zire; Sîshëë", "sii": "Shom Peng", "sij": "Numbami", "sik": "Sikiana", "sil": "Tumulung Sisaala", "sim": "Mende (Papua New Guinea)", "sio": "Siouan languages", "sip": "Sikkimese", "siq": "Sonia", "sir": "Siri", "sis": "Siuslaw", "sit": "Sino-Tibetan languages", "siu": "Sinagen", "siv": "Sumariup", "siw": "Siwai", "six": "Sumau", "siy": "Sivandi", "siz": "Siwi", "sja": "Epena", "sjb": "Sajau Basap", "sjd": "Kildin Sami", "sje": "Pite Sami", "sjg": "Assangori", "sjk": "Kemi Sami", "sjl": "Sajalong; Miji", "sjm": "Mapun", "sjn": "Sindarin", "sjo": "Xibe", "sjp": "Surjapuri", "sjr": "Siar-Lak", "sjs": "Senhaja De Srair", "sjt": "Ter Sami", "sju": "Ume Sami", "sjw": "Shawnee", "sk": "Slovak", "ska": "Skagit", "skb": "Saek", "skc": "Ma Manda", "skd": "Southern Sierra Miwok", "ske": "Seke (Vanuatu)", "skf": "Sakirabiá", "skg": "Sakalava Malagasy", "skh": "Sikule", "ski": "Sika", "skj": "Seke (Nepal)", "skm": "Kutong", "skn": "Kolibugan Subanon", "sko": "Seko Tengah", "skp": "Sekapan", "skq": "Sininkere", "skr": "Saraiki; Seraiki", "sks": "Maia", "skt": "Sakata", "sku": "Sakao", "skv": "Skou", "skw": "Skepi Creole Dutch", "skx": "Seko Padang", "sky": "Sikaiana", "skz": "Sekar", "sl": "Slovenian", "sla": "Slavic languages", "slc": "Sáliba", "sld": "Sissala", "sle": "Sholaga", "slf": "Swiss-Italian Sign Language", "slg": "Selungai Murut", "slh": "Southern Puget Sound Salish", "sli": "Lower Silesian", "slj": "Salumá", "sll": "Salt-Yui", "slm": "Pangutaran Sama", "sln": "Salinan", "slp": "Lamaholot", "slq": "Salchuq", "slr": "Salar", "sls": "Singapore Sign Language", "slt": "Sila", "slu": "Selaru", "slw": "Sialum", "slx": "Salampasu", "sly": "Selayar", "slz": "Ma'ya", "sm": "Samoan", "sma": "Southern Sami", "smb": "Simbari", "smc": "Som", "smf": "Auwe", "smg": "Simbali", "smh": "Samei", "smi": "Sami languages", "smj": "Lule Sami", "smk": "Bolinao", "sml": "Central Sama", "smm": "Musasa", "smn": "Inari Sami", "smp": "Samaritan", "smq": "Samo", "smr": "Simeulue", "sms": "Skolt Sami", "smt": "Simte", "smu": "Somray", "smv": "Samvedi", "smw": "Sumbawa", "smx": "Samba", "smy": "Semnani", "smz": "Simeku", "sn": "Shona", "snc": "Sinaugoro", "sne": "Bau Bidayuh", "snf": "Noon", "sng": "Sanga (Democratic Republic of Congo)", "sni": "Sensi", "snj": "Riverain Sango", "snk": "Soninke", "snl": "Sangil", "snm": "Southern Ma'di", "snn": "Siona", "sno": "Snohomish", "snp": "Siane", "snq": "Sangu (Gabon)", "snr": "Sihan", "sns": "South West Bay; Nahavaq", "snu": "Senggi; Viid", "snv": "Sa'ban", "snw": "Selee", "snx": "Sam", "sny": "Saniyo-Hiyewe", "snz": "Kou", "so": "Somali", "soa": "Thai Song", "sob": "Sobei", "soc": "So (Democratic Republic of Congo)", "sod": "Songoora", "soe": "Songomeno", "sog": "Sogdian", "soh": "Aka", "soi": "Sonha", "soj": "Soi", "sok": "Sokoro", "sol": "Solos", "son": "Songhai languages", "soo": "Songo", "sop": "Songe", "soq": "Kanasi", "sor": "Somrai", "sos": "Seeku", "sou": "Southern Thai", "sov": "Sonsorol", "sow": "Sowanda", "sox": "Swo", "soy": "Miyobe", "soz": "Temi", "spb": "Sepa (Indonesia)", "spc": "Sapé", "spd": "Saep", "spe": "Sepa (Papua New Guinea)", "spg": "Sian", "spi": "Saponi", "spk": "Sengo", "spl": "Selepet", "spm": "Akukem", "spn": "Sanapaná", "spo": "Spokane", "spp": "Supyire Senoufo", "spq": "Loreto-Ucayali Spanish", "spr": "Saparua", "sps": "Saposa", "spt": "Spiti Bhoti", "spu": "Sapuan", "spv": "Sambalpuri; Kosli", "spx": "South Picene", "spy": "Sabaot", "sq": "Albanian", "sqa": "Shama-Sambuga", "sqh": "Shau", "sqj": "Albanian languages", "sqk": "Albanian Sign Language", "sqm": "Suma", "sqn": "Susquehannock", "sqo": "Sorkhei", "sqq": "Sou", "sqr": "Siculo Arabic", "sqs": "Sri Lankan Sign Language", "sqt": "Soqotri", "squ": "Squamish", "sqx": "Kufr Qassem Sign Language (KQSL)", "sr": "Serbian", "sra": "Saruga", "srb": "Sora", "src": "Logudorese Sardinian", "sre": "Sara", "srf": "Nafi", "srg": "Sulod", "srh": "Sarikoli", "sri": "Siriano", "srk": "Serudung Murut", "srl": "Isirawa", "srm": "Saramaccan", "srn": "Sranan Tongo", "sro": "Campidanese Sardinian", "srq": "Sirionó", "srr": "Serer", "srs": "Sarsi", "srt": "Sauri", "sru": "Suruí", "srv": "Southern Sorsoganon", "srw": "Serua", "srx": "Sirmauri", "sry": "Sera", "srz": "Shahmirzadi", "ss": "Swati", "ssa": "Nilo-Saharan languages", "ssb": "Southern Sama", "ssc": "Suba-Simbiti", "ssd": "Siroi", "sse": "Balangingi; Bangingih Sama", "ssf": "Thao", "ssg": "Seimat", "ssh": "Shihhi Arabic", "ssi": "Sansi", "ssj": "Sausi", "ssk": "Sunam", "ssl": "Western Sisaala", "ssm": "Semnam", "ssn": "Waata", "sso": "Sissano", "ssp": "Spanish Sign Language", "ssq": "So'a", "ssr": "Swiss-French Sign Language", "sss": "Sô", "sst": "Sinasina", "ssu": "Susuami", "ssv": "Shark Bay", "ssx": "Samberigi", "ssy": "Saho", "ssz": "Sengseng", "st": "Southern Sotho", "sta": "Settla", "stb": "Northern Subanen", "std": "Sentinel", "ste": "Liana-Seti", "stf": "Seta", "stg": "Trieng", "sth": "Shelta", "sti": "Bulo Stieng", "stj": "Matya Samo", "stk": "Arammba", "stl": "Stellingwerfs", "stm": "Setaman", "stn": "Owa", "sto": "Stoney", "stp": "Southeastern Tepehuan", "stq": "Saterfriesisch", "str": "Straits Salish", "sts": "Shumashti", "stt": "Budeh Stieng", "stu": "Samtao", "stv": "Silt'e", "stw": "Satawalese", "sty": "Siberian Tatar", "su": "Sundanese", "sua": "Sulka", "sub": "Suku", "suc": "Western Subanon", "sue": "Suena", "sug": "Suganga", "sui": "Suki", "suj": "Shubi", "suk": "Sukuma", "suo": "Bouni", "suq": "Tirmaga-Chai Suri; Suri", "sur": "Mwaghavul", "sus": "Susu", "sut": "Subtiaba", "suv": "Puroik", "suw": "Sumbwa", "sux": "Sumerian", "suy": "Suyá", "suz": "Sunwar", "sv": "Swedish", "sva": "Svan", "svb": "Ulau-Suain", "svc": "Vincentian Creole English", "sve": "Serili", "svk": "Slovakian Sign Language", "svm": "Slavomolisano", "svs": "Savosavo", "svx": "Skalvian", "sw": "Swahili (macrolanguage)", "swb": "Maore Comorian", "swc": "Congo Swahili", "swf": "Sere", "swg": "Swabian", "swh": "Swahili (individual language); Kiswahili", "swi": "Sui", "swj": "Sira", "swk": "Malawi Sena", "swl": "Swedish Sign Language", "swm": "Samosa", "swn": "Sawknah", "swo": "Shanenawa", "swp": "Suau", "swq": "Sharwa", "swr": "Saweru", "sws": "Seluwasan", "swt": "Sawila", "swu": "Suwawa", "swv": "Shekhawati", "sww": "Sowa", "swx": "Suruahá", "swy": "Sarua", "sxb": "Suba", "sxc": "Sicanian", "sxe": "Sighu", "sxg": "Shuhi; Shixing", "sxk": "Southern Kalapuya", "sxl": "Selian", "sxm": "Samre", "sxn": "Sangir", "sxo": "Sorothaptic", "sxr": "Saaroa", "sxs": "Sasaru", "sxu": "Upper Saxon", "sxw": "Saxwe Gbe", "sya": "Siang", "syb": "Central Subanen", "syc": "Classical Syriac", "syd": "Samoyedic languages", "syi": "Seki", "syk": "Sukur", "syl": "Sylheti", "sym": "Maya Samo", "syn": "Senaya", "syo": "Suoy", "syr": "Syriac", "sys": "Sinyar", "syw": "Kagate", "syx": "Samay", "syy": "Al-Sayyid Bedouin Sign Language", "sza": "Semelai", "szb": "Ngalum", "szc": "Semaq Beri", "szd": "Seru", "sze": "Seze", "szg": "Sengele", "szl": "Silesian", "szn": "Sula", "szp": "Suabo", "szs": "Solomon Islands Sign Language", "szv": "Isu (Fako Division)", "szw": "Sawai", "szy": "Sakizaya", "ta": "Tamil", "taa": "Lower Tanana", "tab": "Tabassaran", "tac": "Lowland Tarahumara", "tad": "Tause", "tae": "Tariana", "taf": "Tapirapé", "tag": "Tagoi", "tai": "Tai languages", "taj": "Eastern Tamang", "tak": "Tala", "tal": "Tal", "tan": "Tangale", "tao": "Yami", "tap": "Taabwa", "taq": "Tamasheq", "tar": "Central Tarahumara", "tas": "Tay Boi", "tau": "Upper Tanana", "tav": "Tatuyo", "taw": "Tai", "tax": "Tamki", "tay": "Atayal", "taz": "Tocho", "tba": "Aikanã", "tbc": "Takia", "tbd": "Kaki Ae", "tbe": "Tanimbili", "tbf": "Mandara", "tbg": "North Tairora", "tbh": "Dharawal; Thurawal", "tbi": "Gaam", "tbj": "Tiang", "tbk": "Calamian Tagbanwa", "tbl": "Tboli", "tbm": "Tagbu", "tbn": "Barro Negro Tunebo", "tbo": "Tawala", "tbp": "Taworta; Diebroud", "tbq": "Tibeto-Burman languages", "tbr": "Tumtum", "tbs": "Tanguat", "tbt": "Tembo (Kitembo)", "tbu": "Tubar", "tbv": "Tobo", "tbw": "Tagbanwa", "tbx": "Kapin", "tby": "Tabaru", "tbz": "Ditammari", "tca": "Ticuna", "tcb": "Tanacross", "tcc": "Datooga", "tcd": "Tafi", "tce": "Southern Tutchone", "tcf": "Malinaltepec Me'phaa; Malinaltepec Tlapanec", "tcg": "Tamagario", "tch": "Turks And Caicos Creole English", "tci": "Wára", "tck": "Tchitchege", "tcl": "Taman (Myanmar)", "tcm": "Tanahmerah", "tcn": "Tichurong", "tco": "Taungyo", "tcp": "Tawr Chin", "tcq": "Kaiy", "tcs": "Torres Strait Creole; Yumplatok", "tct": "T'en", "tcu": "Southeastern Tarahumara", "tcw": "Tecpatlán Totonac", "tcx": "Toda", "tcy": "Tulu", "tcz": "Thado Chin", "tda": "Tagdal", "tdb": "Panchpargania", "tdc": "Emberá-Tadó", "tdd": "Tai Nüa", "tde": "Tiranige Diga Dogon", "tdf": "Talieng", "tdg": "Western Tamang", "tdh": "Thulung", "tdi": "Tomadino", "tdj": "Tajio", "tdk": "Tambas", "tdl": "Sur", "tdm": "Taruma", "tdn": "Tondano", "tdo": "Teme", "tdq": "Tita", "tdr": "Todrah", "tds": "Doutai", "tdt": "Tetun Dili", "tdv": "Toro", "tdx": "Tandroy-Mahafaly Malagasy", "tdy": "Tadyawan", "te": "Telugu", "tea": "Temiar", "teb": "Tetete", "tec": "Terik", "ted": "Tepo Krumen", "tee": "Huehuetla Tepehua", "tef": "Teressa", "teg": "Teke-Tege", "teh": "Tehuelche", "tei": "Torricelli", "tek": "Ibali Teke", "tem": "Timne", "ten": "Tama (Colombia)", "teo": "Teso", "tep": "Tepecano", "teq": "Temein", "ter": "Tereno", "tes": "Tengger", "tet": "Tetum", "teu": "Soo", "tev": "Teor", "tew": "Tewa (USA)", "tex": "Tennet", "tey": "Tulishi", "tez": "Tetserret", "tfi": "Tofin Gbe", "tfn": "Tanaina", "tfo": "Tefaro", "tfr": "Teribe", "tft": "Ternate", "tg": "Tajik", "tga": "Sagalla", "tgb": "Tobilung", "tgc": "Tigak", "tgd": "Ciwogai", "tge": "Eastern Gorkha Tamang", "tgf": "Chalikha", "tgh": "Tobagonian Creole English", "tgi": "Lawunuia", "tgj": "Tagin", "tgn": "Tandaganon", "tgo": "Sudest", "tgp": "Tangoa", "tgq": "Tring", "tgr": "Tareng", "tgs": "Nume", "tgt": "Central Tagbanwa", "tgu": "Tanggu", "tgv": "Tingui-Boto", "tgw": "Tagwana Senoufo", "tgx": "Tagish", "tgy": "Togoyo", "tgz": "Tagalaka", "th": "Thai", "thd": "Kuuk Thaayorre; Thayore", "the": "Chitwania Tharu", "thf": "Thangmi", "thh": "Northern Tarahumara", "thi": "Tai Long", "thk": "Tharaka; Kitharaka", "thl": "Dangaura Tharu", "thm": "Aheu", "thn": "Thachanadan", "thp": "Thompson", "thq": "Kochila Tharu", "thr": "Rana Tharu", "ths": "Thakali", "tht": "Tahltan", "thu": "Thuri", "thv": "Tahaggart Tamahaq", "thy": "Tha", "thz": "Tayart Tamajeq", "ti": "Tigrinya", "tia": "Tidikelt Tamazight", "tic": "Tira", "tif": "Tifal", "tig": "Tigre", "tih": "Timugon Murut", "tii": "Tiene", "tij": "Tilung", "tik": "Tikar", "til": "Tillamook", "tim": "Timbe", "tin": "Tindi", "tio": "Teop", "tip": "Trimuris", "tiq": "Tiéfo", "tis": "Masadiit Itneg", "tit": "Tinigua", "tiu": "Adasen", "tiv": "Tiv", "tiw": "Tiwi", "tix": "Southern Tiwa", "tiy": "Tiruray", "tiz": "Tai Hongjin", "tja": "Tajuasohn", "tjg": "Tunjung", "tji": "Northern Tujia", "tjj": "Tjungundji", "tjl": "Tai Laing", "tjm": "Timucua", "tjn": "Tonjon", "tjo": "Temacine Tamazight", "tjp": "Tjupany", "tjs": "Southern Tujia", "tju": "Tjurruru", "tjw": "Djabwurrung", "tk": "Turkmen", "tka": "Truká", "tkb": "Buksa", "tkd": "Tukudede", "tke": "Takwane", "tkf": "Tukumanféd", "tkg": "Tesaka Malagasy", "tkl": "Tokelau", "tkm": "Takelma", "tkn": "Toku-No-Shima", "tkp": "Tikopia", "tkq": "Tee", "tkr": "Tsakhur", "tks": "Takestani", "tkt": "Kathoriya Tharu", "tku": "Upper Necaxa Totonac", "tkv": "Mur Pano", "tkw": "Teanu", "tkx": "Tangko", "tkz": "Takua", "tl": "Tagalog", "tla": "Southwestern Tepehuan", "tlb": "Tobelo", "tlc": "Yecuatla Totonac", "tld": "Talaud", "tlf": "Telefol", "tlg": "Tofanma", "tlh": "Klingon; tlhIngan Hol", "tli": "Tlingit", "tlj": "Talinga-Bwisi", "tlk": "Taloki", "tll": "Tetela", "tlm": "Tolomako", "tln": "Talondo'", "tlo": "Talodi", "tlp": "Filomena Mata-Coahuitlán Totonac", "tlq": "Tai Loi", "tlr": "Talise", "tls": "Tambotalo", "tlt": "Sou Nama; Teluti", "tlu": "Tulehu", "tlv": "Taliabu", "tlx": "Khehek", "tly": "Talysh", "tma": "Tama (Chad)", "tmb": "Katbol; Avava", "tmc": "Tumak", "tmd": "Haruai", "tme": "Tremembé", "tmf": "Toba-Maskoy", "tmg": "Ternateño", "tmh": "Tamashek", "tmi": "Tutuba", "tmj": "Samarokena", "tmk": "Northwestern Tamang", "tml": "Tamnim Citak", "tmm": "Tai Thanh", "tmn": "Taman (Indonesia)", "tmo": "Temoq", "tmq": "Tumleo", "tmr": "Jewish Babylonian Aramaic (ca. 200-1200 CE)", "tms": "Tima", "tmt": "Tasmate", "tmu": "Iau", "tmv": "Tembo (Motembo)", "tmw": "Temuan", "tmy": "Tami", "tmz": "Tamanaku", "tn": "Tswana", "tna": "Tacana", "tnb": "Western Tunebo", "tnc": "Tanimuca-Retuarã", "tnd": "Angosturas Tunebo", "tng": "Tobanga", "tnh": "Maiani", "tni": "Tandia", "tnk": "Kwamera", "tnl": "Lenakel", "tnm": "Tabla", "tnn": "North Tanna", "tno": "Toromono", "tnp": "Whitesands", "tnq": "Taino", "tnr": "Ménik", "tns": "Tenis", "tnt": "Tontemboan", "tnu": "Tay Khang", "tnv": "Tangchangya", "tnw": "Tonsawang", "tnx": "Tanema", "tny": "Tongwe", "tnz": "Ten'edn", "to": "Tonga (Tonga Islands)", "tob": "Toba", "toc": "Coyutla Totonac", "tod": "Toma", "tof": "Gizrra", "tog": "Tonga (Nyasa)", "toh": "Gitonga", "toi": "Tonga (Zambia)", "toj": "Tojolabal", "tok": "Toki Pona", "tol": "Tolowa", "tom": "Tombulu", "too": "Xicotepec De Juárez Totonac", "top": "Papantla Totonac", "toq": "Toposa", "tor": "Togbo-Vara Banda", "tos": "Highland Totonac", "tou": "Tho", "tov": "Upper Taromi", "tow": "Jemez", "tox": "Tobian", "toy": "Topoiyo", "toz": "To", "tpa": "Taupota", "tpc": "Azoyú Me'phaa; Azoyú Tlapanec", "tpe": "Tippera", "tpf": "Tarpia", "tpg": "Kula", "tpi": "Tok Pisin", "tpj": "Tapieté", "tpk": "Tupinikin", "tpl": "Tlacoapa Me'phaa; Tlacoapa Tlapanec", "tpm": "Tampulma", "tpn": "Tupinambá", "tpo": "Tai Pao", "tpp": "Pisaflores Tepehua", "tpq": "Tukpa", "tpr": "Tuparí", "tpt": "Tlachichilco Tepehua", "tpu": "Tampuan", "tpv": "Tanapag", "tpw": "Tupí", "tpx": "Acatepec Me'phaa; Acatepec Tlapanec", "tpy": "Trumai", "tpz": "Tinputz", "tqb": "Tembé", "tql": "Lehali", "tqm": "Turumsa", "tqn": "Tenino", "tqo": "Toaripi", "tqp": "Tomoip", "tqq": "Tunni", "tqr": "Torona", "tqt": "Western Totonac", "tqu": "Touo", "tqw": "Tonkawa", "tr": "Turkish", "tra": "Tirahi", "trb": "Terebu", "trc": "Copala Triqui", "trd": "Turi", "tre": "East Tarangan", "trf": "Trinidadian Creole English", "trg": "Lishán Didán", "trh": "Turaka", "tri": "Trió", "trj": "Toram", "trk": "Turkic languages", "trl": "Traveller Scottish", "trm": "Tregami", "trn": "Trinitario", "tro": "Tarao Naga", "trp": "Kok Borok", "trq": "San Martín Itunyoso Triqui", "trr": "Taushiro", "trs": "Chicahuaxtla Triqui", "trt": "Tunggare", "tru": "Turoyo; Surayt", "trv": "Sediq; Seediq; Taroko", "trw": "Torwali", "trx": "Tringgus-Sembaan Bidayuh", "try": "Turung", "trz": "Torá", "ts": "Tsonga", "tsa": "Tsaangi", "tsb": "Tsamai", "tsc": "Tswa", "tsd": "Tsakonian", "tse": "Tunisian Sign Language", "tsg": "Tausug", "tsh": "Tsuvan", "tsi": "Tsimshian", "tsj": "Tshangla", "tsk": "Tseku", "tsl": "Ts'ün-Lao", "tsm": "Turkish Sign Language; Türk İşaret Dili", "tsp": "Northern Toussian", "tsq": "Thai Sign Language", "tsr": "Akei", "tss": "Taiwan Sign Language", "tst": "Tondi Songway Kiini", "tsu": "Tsou", "tsv": "Tsogo", "tsw": "Tsishingini", "tsx": "Mubami", "tsy": "Tebul Sign Language", "tsz": "Purepecha", "tt": "Tatar", "tta": "Tutelo", "ttb": "Gaa", "ttc": "Tektiteko", "ttd": "Tauade", "tte": "Bwanabwana", "ttf": "Tuotomb", "ttg": "Tutong", "tth": "Upper Ta'oih", "tti": "Tobati", "ttj": "Tooro", "ttk": "Totoro", "ttl": "Totela", "ttm": "Northern Tutchone", "ttn": "Towei", "tto": "Lower Ta'oih", "ttp": "Tombelala", "ttq": "Tawallammat Tamajaq", "ttr": "Tera", "tts": "Northeastern Thai", "ttt": "Muslim Tat", "ttu": "Torau", "ttv": "Titan", "ttw": "Long Wat", "tty": "Sikaritai", "ttz": "Tsum", "tua": "Wiarumus", "tub": "Tübatulabal", "tuc": "Mutu", "tud": "Tuxá", "tue": "Tuyuca", "tuf": "Central Tunebo", "tug": "Tunia", "tuh": "Taulil", "tui": "Tupuri", "tuj": "Tugutil", "tul": "Tula", "tum": "Tumbuka", "tun": "Tunica", "tuo": "Tucano", "tup": "Tupi languages", "tuq": "Tedaga", "tus": "Tuscarora", "tut": "Altaic languages", "tuu": "Tututni", "tuv": "Turkana", "tuw": "Tungus languages", "tux": "Tuxináwa", "tuy": "Tugen", "tuz": "Turka", "tva": "Vaghua", "tvd": "Tsuvadi", "tve": "Te'un", "tvk": "Southeast Ambrym", "tvl": "Tuvalu", "tvm": "Tela-Masbuar", "tvn": "Tavoyan", "tvo": "Tidore", "tvs": "Taveta", "tvt": "Tutsa Naga", "tvu": "Tunen", "tvw": "Sedoa", "tvx": "Taivoan", "tvy": "Timor Pidgin", "tw": "Twi", "twa": "Twana", "twb": "Western Tawbuid", "twc": "Teshenawa", "twd": "Twents", "twe": "Tewa (Indonesia)", "twf": "Northern Tiwa", "twg": "Tereweng", "twh": "Tai Dón", "twl": "Tawara", "twm": "Tawang Monpa", "twn": "Twendi", "two": "Tswapong", "twp": "Ere", "twq": "Tasawaq", "twr": "Southwestern Tarahumara", "twt": "Turiwára", "twu": "Termanu", "tww": "Tuwari", "twx": "Tewe", "twy": "Tawoyan", "txa": "Tombonuo", "txb": "Tokharian B", "txc": "Tsetsaut", "txe": "Totoli", "txg": "Tangut", "txh": "Thracian", "txi": "Ikpeng", "txj": "Tarjumo", "txm": "Tomini", "txn": "West Tarangan", "txo": "Toto", "txq": "Tii", "txr": "Tartessian", "txs": "Tonsea", "txt": "Citak", "txu": "Kayapó", "txx": "Tatana", "txy": "Tanosy Malagasy", "ty": "Tahitian", "tya": "Tauya", "tye": "Kyanga", "tyh": "O'du", "tyi": "Teke-Tsaayi", "tyj": "Tai Do; Tai Yo", "tyl": "Thu Lao", "tyn": "Kombai", "typ": "Thaypan", "tyr": "Tai Daeng", "tys": "Tày Sa Pa", "tyt": "Tày Tac", "tyu": "Kua", "tyv": "Tuvinian", "tyx": "Teke-Tyee", "tyy": "Tiyaa", "tyz": "Tày", "tza": "Tanzanian Sign Language", "tzh": "Tzeltal", "tzj": "Tz'utujil", "tzl": "Talossan", "tzm": "Central Atlas Tamazight", "tzn": "Tugun", "tzo": "Tzotzil", "tzx": "Tabriak", "uam": "Uamué", "uan": "Kuan", "uar": "Tairuma", "uba": "Ubang", "ubi": "Ubi", "ubl": "Buhi'non Bikol", "ubr": "Ubir", "ubu": "Umbu-Ungu", "uby": "Ubykh", "uda": "Uda", "ude": "Udihe", "udg": "Muduga", "udi": "Udi", "udj": "Ujir", "udl": "Wuzlam", "udm": "Udmurt", "udu": "Uduk", "ues": "Kioko", "ufi": "Ufim", "ug": "Uighur; Uyghur", "uga": "Ugaritic", "ugb": "Kuku-Ugbanh", "uge": "Ughele", "ugh": "Kubachi", "ugn": "Ugandan Sign Language", "ugo": "Ugong", "ugy": "Uruguayan Sign Language", "uha": "Uhami", "uhn": "Damal", "uis": "Uisai", "uiv": "Iyive", "uji": "Tanjijili", "uk": "Ukrainian", "uka": "Kaburi", "ukg": "Ukuriguma", "ukh": "Ukhwejo", "uki": "Kui (India)", "ukk": "Muak Sa-aak", "ukl": "Ukrainian Sign Language", "ukp": "Ukpe-Bayobiri", "ukq": "Ukwa", "uks": "Urubú-Kaapor Sign Language; Kaapor Sign Language", "uku": "Ukue", "ukv": "Kuku", "ukw": "Ukwuani-Aboh-Ndoni", "uky": "Kuuk-Yak", "ula": "Fungwa", "ulb": "Ulukwumi", "ulc": "Ulch", "ule": "Lule", "ulf": "Usku; Afra", "uli": "Ulithian", "ulk": "Meriam Mir", "ull": "Ullatan", "ulm": "Ulumanda'", "uln": "Unserdeutsch", "ulu": "Uma' Lung", "ulw": "Ulwa", "uma": "Umatilla", "umb": "Umbundu", "umc": "Marrucinian", "umd": "Umbindhamu", "umg": "Morrobalama; Umbuygamu", "umi": "Ukit", "umm": "Umon", "umn": "Makyan Naga", "umo": "Umotína", "ump": "Umpila", "umr": "Umbugarla", "ums": "Pendau", "umu": "Munsee", "una": "North Watut", "und": "Undetermined", "une": "Uneme", "ung": "Ngarinyin", "uni": "Uni", "unk": "Enawené-Nawé", "unm": "Unami", "unn": "Kurnai", "unr": "Mundari", "unu": "Unubahe", "unx": "Munda", "unz": "Unde Kaili", "uon": "Kulon", "upi": "Umeda", "upv": "Uripiv-Wala-Rano-Atchin", "ur": "Urdu", "ura": "Urarina", "urb": "Urubú-Kaapor; Kaapor", "urc": "Urningangg", "ure": "Uru", "urf": "Uradhi", "urg": "Urigina", "urh": "Urhobo", "uri": "Urim", "urj": "Uralic languages", "urk": "Urak Lawoi'", "url": "Urali", "urm": "Urapmin", "urn": "Uruangnirin", "uro": "Ura (Papua New Guinea)", "urp": "Uru-Pa-In", "urr": "Lehalurup; Löyöp", "urt": "Urat", "uru": "Urumi", "urv": "Uruava", "urw": "Sop", "urx": "Urimo", "ury": "Orya", "urz": "Uru-Eu-Wau-Wau", "usa": "Usarufa", "ush": "Ushojo", "usi": "Usui", "usk": "Usaghade", "usp": "Uspanteco", "uss": "us-Saare", "usu": "Uya", "uta": "Otank", "ute": "Ute-Southern Paiute", "uth": "ut-Hun", "utp": "Amba (Solomon Islands)", "utr": "Etulo", "utu": "Utu", "uum": "Urum", "uur": "Ura (Vanuatu)", "uuu": "U", "uve": "West Uvean; Fagauvea", "uvh": "Uri", "uvl": "Lote", "uwa": "Kuku-Uwanh", "uya": "Doko-Uyanga", "uz": "Uzbek", "uzn": "Northern Uzbek", "uzs": "Southern Uzbek", "vaa": "Vaagri Booli", "vae": "Vale", "vaf": "Vafsi", "vag": "Vagla", "vah": "Varhadi-Nagpuri", "vai": "Vai", "vaj": "Sekele; Northwestern ǃKung; Vasekele", "val": "Vehes", "vam": "Vanimo", "van": "Valman", "vao": "Vao", "vap": "Vaiphei", "var": "Huarijio", "vas": "Vasavi", "vau": "Vanuma", "vav": "Varli", "vay": "Wayu", "vbb": "Southeast Babar", "vbk": "Southwestern Bontok", "ve": "Venda", "vec": "Venetian", "ved": "Veddah", "vel": "Veluws", "vem": "Vemgo-Mabas", "veo": "Ventureño", "vep": "Veps", "ver": "Mom Jango", "vgr": "Vaghri", "vgt": "Vlaamse Gebarentaal; Flemish Sign Language", "vi": "Vietnamese", "vic": "Virgin Islands Creole English", "vid": "Vidunda", "vif": "Vili", "vig": "Viemo", "vil": "Vilela", "vin": "Vinza", "vis": "Vishavan", "vit": "Viti", "viv": "Iduna", "vka": "Kariyarra", "vkj": "Kujarge", "vkk": "Kaur", "vkl": "Kulisusu", "vkm": "Kamakan", "vkn": "Koro Nulu", "vko": "Kodeoha", "vkp": "Korlai Creole Portuguese", "vkt": "Tenggarong Kutai Malay", "vku": "Kurrama", "vkz": "Koro Zuba", "vlp": "Valpei", "vls": "Vlaams", "vma": "Martuyhunira", "vmb": "Barbaram", "vmc": "Juxtlahuaca Mixtec", "vmd": "Mudu Koraga", "vme": "East Masela", "vmf": "Mainfränkisch", "vmg": "Lungalunga", "vmh": "Maraghei", "vmi": "Miwa", "vmj": "Ixtayutla Mixtec", "vmk": "Makhuwa-Shirima", "vml": "Malgana", "vmm": "Mitlatongo Mixtec", "vmp": "Soyaltepec Mazatec", "vmq": "Soyaltepec Mixtec", "vmr": "Marenje", "vms": "Moksela", "vmu": "Muluridyi", "vmv": "Valley Maidu", "vmw": "Makhuwa", "vmx": "Tamazola Mixtec", "vmy": "Ayautla Mazatec", "vmz": "Mazatlán Mazatec", "vnk": "Vano; Lovono", "vnm": "Vinmavis; Neve'ei", "vnp": "Vunapu", "vo": "Volapük", "vor": "Voro", "vot": "Votic", "vra": "Vera'a", "vro": "Võro", "vrs": "Varisi", "vrt": "Burmbar; Banam Bay", "vsi": "Moldova Sign Language", "vsl": "Venezuelan Sign Language", "vsv": "Valencian Sign Language; Llengua de signes valenciana", "vto": "Vitou", "vum": "Vumbu", "vun": "Vunjo", "vut": "Vute", "vwa": "Awa (China)", "wa": "Walloon", "waa": "Walla Walla", "wab": "Wab", "wac": "Wasco-Wishram", "wad": "Wamesa; Wondama", "wae": "Walser", "waf": "Wakoná", "wag": "Wa'ema", "wah": "Watubela", "wai": "Wares", "waj": "Waffa", "wak": "Wakashan languages", "wal": "Wolaytta; Wolaitta", "wam": "Wampanoag", "wan": "Wan", "wao": "Wappo", "wap": "Wapishana", "waq": "Wagiman", "war": "Waray (Philippines)", "was": "Washo", "wat": "Kaninuwa", "wau": "Waurá", "wav": "Waka", "waw": "Waiwai", "wax": "Watam; Marangis", "way": "Wayana", "waz": "Wampur", "wba": "Warao", "wbb": "Wabo", "wbe": "Waritai", "wbf": "Wara", "wbh": "Wanda", "wbi": "Vwanji", "wbj": "Alagwa", "wbk": "Waigali", "wbl": "Wakhi", "wbm": "Wa", "wbp": "Warlpiri", "wbq": "Waddar", "wbr": "Wagdi", "wbs": "West Bengal Sign Language", "wbt": "Warnman", "wbv": "Wajarri", "wbw": "Woi", "wca": "Yanomámi", "wci": "Waci Gbe", "wdd": "Wandji", "wdg": "Wadaginam", "wdj": "Wadjiginy", "wdk": "Wadikali", "wdt": "Wendat", "wdu": "Wadjigu", "wdy": "Wadjabangayi", "wea": "Wewaw", "wec": "Wè Western", "wed": "Wedau", "weg": "Wergaia", "weh": "Weh", "wei": "Kiunum", "wem": "Weme Gbe", "wen": "Sorbian languages", "weo": "Wemale", "wep": "Westphalien", "wer": "Weri", "wes": "Cameroon Pidgin", "wet": "Perai", "weu": "Rawngtu Chin", "wew": "Wejewa", "wfg": "Yafi; Zorop", "wga": "Wagaya", "wgb": "Wagawaga", "wgg": "Wangkangurru; Wangganguru", "wgi": "Wahgi", "wgo": "Waigeo", "wgu": "Wirangu", "wgy": "Warrgamay", "wha": "Sou Upaa; Manusela", "whg": "North Wahgi", "whk": "Wahau Kenyah", "whu": "Wahau Kayan", "wib": "Southern Toussian", "wic": "Wichita", "wie": "Wik-Epa", "wif": "Wik-Keyangan", "wig": "Wik Ngathan", "wih": "Wik-Me'anha", "wii": "Minidien", "wij": "Wik-Iiyanh", "wik": "Wikalkan", "wil": "Wilawila", "wim": "Wik-Mungkan", "win": "Ho-Chunk", "wir": "Wiraféd", "wiu": "Wiru", "wiv": "Vitu", "wiy": "Wiyot", "wja": "Waja", "wji": "Warji", "wka": "Kw'adza", "wkb": "Kumbaran", "wkd": "Wakde; Mo", "wkl": "Kalanadi", "wkr": "Keerray-Woorroong", "wku": "Kunduvadi", "wkw": "Wakawaka", "wky": "Wangkayutyuru", "wla": "Walio", "wlc": "Mwali Comorian", "wle": "Wolane", "wlg": "Kunbarlang", "wlh": "Welaun", "wli": "Waioli", "wlk": "Wailaki", "wll": "Wali (Sudan)", "wlm": "Middle Welsh", "wlo": "Wolio", "wlr": "Wailapa", "wls": "Wallisian", "wlu": "Wuliwuli", "wlv": "Wichí Lhamtés Vejoz", "wlw": "Walak", "wlx": "Wali (Ghana)", "wly": "Waling", "wma": "Mawa (Nigeria)", "wmb": "Wambaya", "wmc": "Wamas", "wmd": "Mamaindé", "wme": "Wambule", "wmg": "Western Minyag", "wmh": "Waima'a", "wmi": "Wamin", "wmm": "Maiwa (Indonesia)", "wmn": "Waamwang", "wmo": "Wom (Papua New Guinea)", "wms": "Wambon", "wmt": "Walmajarri", "wmw": "Mwani", "wmx": "Womo", "wnb": "Wanambre", "wnc": "Wantoat", "wnd": "Wandarang", "wne": "Waneci", "wng": "Wanggom", "wni": "Ndzwani Comorian", "wnk": "Wanukaka", "wnm": "Wanggamala", "wnn": "Wunumara", "wno": "Wano", "wnp": "Wanap", "wnu": "Usan", "wnw": "Wintu", "wny": "Wanyi; Waanyi", "wo": "Wolof", "woa": "Kuwema; Tyaraity", "wob": "Wè Northern", "woc": "Wogeo", "wod": "Wolani", "woe": "Woleaian", "wof": "Gambian Wolof", "wog": "Wogamusin", "woi": "Kamang", "wok": "Longto", "wom": "Wom (Nigeria)", "won": "Wongo", "woo": "Manombai", "wor": "Woria", "wos": "Hanga Hundi", "wow": "Wawonii", "woy": "Weyto", "wpc": "Maco", "wrb": "Waluwarra; Warluwara", "wrg": "Warungu; Gudjal", "wrh": "Wiradjuri", "wri": "Wariyangga", "wrk": "Garrwa", "wrl": "Warlmanpa", "wrm": "Warumungu", "wrn": "Warnang", "wro": "Worrorra", "wrp": "Waropen", "wrr": "Wardaman", "wrs": "Waris", "wru": "Waru", "wrv": "Waruna", "wrw": "Gugu Warra", "wrx": "Wae Rana", "wry": "Merwari", "wrz": "Waray (Australia)", "wsa": "Warembori", "wsg": "Adilabad Gondi", "wsi": "Wusi", "wsk": "Waskia", "wsr": "Owenia", "wss": "Wasa", "wsu": "Wasu", "wsv": "Wotapuri-Katarqalai", "wtf": "Watiwa", "wth": "Wathawurrung", "wti": "Berta", "wtk": "Watakataui", "wtm": "Mewati", "wtw": "Wotu", "wua": "Wikngenchera", "wub": "Wunambal", "wud": "Wudu", "wuh": "Wutunhua", "wul": "Silimo", "wum": "Wumbvu", "wun": "Bungu", "wur": "Wurrugu", "wut": "Wutung", "wuu": "Wu Chinese", "wuv": "Wuvulu-Aua", "wux": "Wulna", "wuy": "Wauyai", "wwa": "Waama", "wwb": "Wakabunga", "wwo": "Wetamut; Dorig", "wwr": "Warrwa", "www": "Wawa", "wxa": "Waxianghua", "wxw": "Wardandi", "wyb": "Wangaaybuwan-Ngiyambaa", "wyi": "Woiwurrung", "wym": "Wymysorys", "wyn": "Wyandot", "wyr": "Wayoró", "wyy": "Western Fijian", "xaa": "Andalusian Arabic", "xab": "Sambe", "xac": "Kachari", "xad": "Adai", "xae": "Aequian", "xag": "Aghwan", "xai": "Kaimbé", "xaj": "Ararandewára", "xak": "Máku", "xal": "Kalmyk; Oirat", "xam": "ǀXam", "xan": "Xamtanga", "xao": "Khao", "xap": "Apalachee", "xaq": "Aquitanian", "xar": "Karami", "xas": "Kamas", "xat": "Katawixi", "xau": "Kauwera", "xav": "Xavánte", "xaw": "Kawaiisu", "xay": "Kayan Mahakam", "xbb": "Lower Burdekin", "xbc": "Bactrian", "xbd": "Bindal", "xbe": "Bigambal", "xbg": "Bunganditj", "xbi": "Kombio", "xbj": "Birrpayi", "xbm": "Middle Breton", "xbn": "Kenaboi", "xbo": "Bolgarian", "xbp": "Bibbulman", "xbr": "Kambera", "xbw": "Kambiwá", "xby": "Batjala; Batyala", "xcb": "Cumbric", "xcc": "Camunic", "xce": "Celtiberian", "xcg": "Cisalpine Gaulish", "xch": "Chemakum; Chimakum", "xcl": "Classical Armenian", "xcm": "Comecrudo", "xcn": "Cotoname", "xco": "Chorasmian", "xcr": "Carian", "xct": "Classical Tibetan", "xcu": "Curonian", "xcv": "Chuvantsy", "xcw": "Coahuilteco", "xcy": "Cayuse", "xda": "Darkinyung", "xdc": "Dacian", "xdk": "Dharuk", "xdm": "Edomite", "xdo": "Kwandu", "xdq": "Kaitag", "xdy": "Malayic Dayak", "xeb": "Eblan", "xed": "Hdi", "xeg": "ǁXegwi", "xel": "Kelo", "xem": "Kembayan", "xep": "Epi-Olmec", "xer": "Xerénte", "xes": "Kesawai", "xet": "Xetá", "xeu": "Keoru-Ahia", "xfa": "Faliscan", "xga": "Galatian", "xgb": "Gbin", "xgd": "Gudang", "xgf": "Gabrielino-Fernandeño", "xgg": "Goreng", "xgi": "Garingbal", "xgl": "Galindan", "xgm": "Dharumbal; Guwinmal", "xgn": "Mongolian languages", "xgr": "Garza", "xgu": "Unggumi", "xgw": "Guwa", "xh": "Xhosa", "xha": "Harami", "xhc": "Hunnic", "xhd": "Hadrami", "xhe": "Khetrani", "xhm": "Middle Khmer (1400 to 1850 CE)", "xhr": "Hernican", "xht": "Hattic", "xhu": "Hurrian", "xhv": "Khua", "xib": "Iberian", "xii": "Xiri", "xil": "Illyrian", "xin": "Xinca", "xir": "Xiriâna", "xis": "Kisan", "xiv": "Indus Valley Language", "xiy": "Xipaya", "xjb": "Minjungbal", "xjt": "Jaitmatang", "xka": "Kalkoti", "xkb": "Northern Nago", "xkc": "Kho'ini", "xkd": "Mendalam Kayan", "xke": "Kereho", "xkf": "Khengkha", "xkg": "Kagoro", "xki": "Kenyan Sign Language", "xkj": "Kajali", "xkk": "Kachok; Kaco'", "xkl": "Mainstream Kenyah", "xkn": "Kayan River Kayan", "xko": "Kiorr", "xkp": "Kabatei", "xkq": "Koroni", "xkr": "Xakriabá", "xks": "Kumbewaha", "xkt": "Kantosi", "xku": "Kaamba", "xkv": "Kgalagadi", "xkw": "Kembra", "xkx": "Karore", "xky": "Uma' Lasan", "xkz": "Kurtokha", "xla": "Kamula", "xlb": "Loup B", "xlc": "Lycian", "xld": "Lydian", "xle": "Lemnian", "xlg": "Ligurian (Ancient)", "xli": "Liburnian", "xln": "Alanic", "xlo": "Loup A", "xlp": "Lepontic", "xls": "Lusitanian", "xlu": "Cuneiform Luwian", "xly": "Elymian", "xma": "Mushungulu", "xmb": "Mbonga", "xmc": "Makhuwa-Marrevone", "xmd": "Mbudum", "xme": "Median", "xmf": "Mingrelian", "xmg": "Mengaka", "xmh": "Kugu-Muminh", "xmj": "Majera", "xmk": "Ancient Macedonian", "xml": "Malaysian Sign Language", "xmm": "Manado Malay", "xmn": "Manichaean Middle Persian", "xmo": "Morerebi", "xmp": "Kuku-Mu'inh", "xmq": "Kuku-Mangk", "xmr": "Meroitic", "xms": "Moroccan Sign Language", "xmt": "Matbat", "xmu": "Kamu", "xmv": "Antankarana Malagasy; Tankarana Malagasy", "xmw": "Tsimihety Malagasy", "xmx": "Salawati; Maden", "xmy": "Mayaguduna", "xmz": "Mori Bawah", "xna": "Ancient North Arabian", "xnb": "Kanakanabu", "xnd": "Na-Dene languages", "xng": "Middle Mongolian", "xnh": "Kuanhua", "xni": "Ngarigu", "xnj": "Ngoni (Tanzania)", "xnk": "Nganakarti", "xnm": "Ngumbarl", "xnn": "Northern Kankanay", "xno": "Anglo-Norman", "xnq": "Ngoni (Mozambique)", "xnr": "Kangri", "xns": "Kanashi", "xnt": "Narragansett", "xnu": "Nukunul", "xny": "Nyiyaparli", "xnz": "Kenzi; Mattoki", "xoc": "O'chi'chi'", "xod": "Kokoda", "xog": "Soga", "xoi": "Kominimung", "xok": "Xokleng", "xom": "Komo (Sudan)", "xon": "Konkomba", "xoo": "Xukurú", "xop": "Kopar", "xor": "Korubo", "xow": "Kowaki", "xpa": "Pirriya", "xpb": "Northeastern Tasmanian; Pyemmairrener", "xpc": "Pecheneg", "xpd": "Oyster Bay Tasmanian", "xpe": "Liberia Kpelle", "xpf": "Southeast Tasmanian; Nuenonne", "xpg": "Phrygian", "xph": "North Midlands Tasmanian; Tyerrenoterpanner", "xpi": "Pictish", "xpj": "Mpalitjanh", "xpk": "Kulina Pano", "xpl": "Port Sorell Tasmanian", "xpm": "Pumpokol", "xpn": "Kapinawá", "xpo": "Pochutec", "xpp": "Puyo-Paekche", "xpq": "Mohegan-Pequot", "xpr": "Parthian", "xps": "Pisidian", "xpt": "Punthamara", "xpu": "Punic", "xpv": "Northern Tasmanian; Tommeginne", "xpw": "Northwestern Tasmanian; Peerapper", "xpx": "Southwestern Tasmanian; Toogee", "xpy": "Puyo", "xpz": "Bruny Island Tasmanian", "xqa": "Karakhanid", "xqt": "Qatabanian", "xra": "Krahô", "xrb": "Eastern Karaboro", "xrd": "Gundungurra", "xre": "Kreye", "xrg": "Minang", "xri": "Krikati-Timbira", "xrm": "Armazic", "xrn": "Arin", "xrr": "Raetic", "xrt": "Aranama-Tamique", "xru": "Marriammu", "xrw": "Karawa", "xsa": "Sabaean", "xsb": "Sambal", "xsc": "Scythian", "xsd": "Sidetic", "xse": "Sempan", "xsh": "Shamang", "xsi": "Sio", "xsj": "Subi", "xsl": "South Slavey", "xsm": "Kasem", "xsn": "Sanga (Nigeria)", "xso": "Solano", "xsp": "Silopi", "xsq": "Makhuwa-Saka", "xsr": "Sherpa", "xss": "Assan", "xsu": "Sanumá", "xsv": "Sudovian", "xsy": "Saisiyat", "xta": "Alcozauca Mixtec", "xtb": "Chazumba Mixtec", "xtc": "Katcha-Kadugli-Miri", "xtd": "Diuxi-Tilantongo Mixtec", "xte": "Ketengban", "xtg": "Transalpine Gaulish", "xth": "Yitha Yitha", "xti": "Sinicahua Mixtec", "xtj": "San Juan Teita Mixtec", "xtl": "Tijaltepec Mixtec", "xtm": "Magdalena Peñasco Mixtec", "xtn": "Northern Tlaxiaco Mixtec", "xto": "Tokharian A", "xtp": "San Miguel Piedras Mixtec", "xtq": "Tumshuqese", "xtr": "Early Tripuri", "xts": "Sindihui Mixtec", "xtt": "Tacahua Mixtec", "xtu": "Cuyamecalco Mixtec", "xtv": "Thawa", "xtw": "Tawandê", "xty": "Yoloxochitl Mixtec", "xua": "Alu Kurumba", "xub": "Betta Kurumba", "xud": "Umiida", "xug": "Kunigami", "xuj": "Jennu Kurumba", "xul": "Ngunawal; Nunukul", "xum": "Umbrian", "xun": "Unggaranggu", "xuo": "Kuo", "xup": "Upper Umpqua", "xur": "Urartian", "xut": "Kuthant", "xuu": "Kxoe; Khwedam", "xve": "Venetic", "xvi": "Kamviri", "xvn": "Vandalic", "xvo": "Volscian", "xvs": "Vestinian", "xwa": "Kwaza", "xwc": "Woccon", "xwd": "Wadi Wadi", "xwe": "Xwela Gbe", "xwg": "Kwegu", "xwj": "Wajuk", "xwk": "Wangkumara", "xwl": "Western Xwla Gbe", "xwo": "Written Oirat", "xwr": "Kwerba Mamberamo", "xwt": "Wotjobaluk", "xww": "Wemba Wemba", "xxb": "Boro (Ghana)", "xxk": "Ke'o", "xxm": "Minkin", "xxr": "Koropó", "xxt": "Tambora", "xya": "Yaygir", "xyb": "Yandjibara", "xyj": "Mayi-Yapi", "xyk": "Mayi-Kulan", "xyl": "Yalakalore", "xyt": "Mayi-Thakurti", "xyy": "Yorta Yorta", "xzh": "Zhang-Zhung", "xzm": "Zemgalian", "xzp": "Ancient Zapotec", "yaa": "Yaminahua", "yab": "Yuhup", "yac": "Pass Valley Yali", "yad": "Yagua", "yae": "Pumé", "yaf": "Yaka (Democratic Republic of Congo)", "yag": "Yámana", "yah": "Yazgulyam", "yai": "Yagnobi", "yaj": "Banda-Yangere", "yak": "Yakama", "yal": "Yalunka", "yam": "Yamba", "yan": "Mayangna", "yao": "Yao", "yap": "Yapese", "yaq": "Yaqui", "yar": "Yabarana", "yas": "Nugunu (Cameroon)", "yat": "Yambeta", "yau": "Yuwana", "yav": "Yangben", "yaw": "Yawalapití", "yax": "Yauma", "yay": "Agwagwune", "yaz": "Lokaa", "yba": "Yala", "ybb": "Yemba", "ybe": "West Yugur", "ybh": "Yakha", "ybi": "Yamphu", "ybj": "Hasha", "ybk": "Bokha", "ybl": "Yukuben", "ybm": "Yaben", "ybn": "Yabaâna", "ybo": "Yabong", "ybx": "Yawiyo", "yby": "Yaweyuha", "ych": "Chesu", "ycl": "Lolopo", "ycn": "Yucuna", "ycp": "Chepya", "yda": "Yanda", "ydd": "Eastern Yiddish", "yde": "Yangum Dey", "ydg": "Yidgha", "ydk": "Yoidik", "yea": "Ravula", "yec": "Yeniche", "yee": "Yimas", "yei": "Yeni", "yej": "Yevanic", "yel": "Yela", "yer": "Tarok", "yes": "Nyankpa", "yet": "Yetfa", "yeu": "Yerukula", "yev": "Yapunda", "yey": "Yeyi", "yga": "Malyangapa", "ygi": "Yiningayi", "ygl": "Yangum Gel", "ygm": "Yagomi", "ygp": "Gepo", "ygr": "Yagaria", "ygs": "Yolŋu Sign Language", "ygu": "Yugul", "ygw": "Yagwoia", "yha": "Baha Buyang", "yhd": "Judeo-Iraqi Arabic", "yhl": "Hlepho Phowa", "yhs": "Yan-nhaŋu Sign Language", "yi": "Yiddish", "yia": "Yinggarda", "yif": "Ache", "yig": "Wusa Nasu", "yih": "Western Yiddish", "yii": "Yidiny", "yij": "Yindjibarndi", "yik": "Dongshanba Lalo", "yil": "Yindjilandji", "yim": "Yimchungru Naga", "yin": "Riang Lai; Yinchia", "yip": "Pholo", "yiq": "Miqie", "yir": "North Awyu", "yis": "Yis", "yit": "Eastern Lalu", "yiu": "Awu", "yiv": "Northern Nisu", "yix": "Axi Yi", "yiz": "Azhe", "yka": "Yakan", "ykg": "Northern Yukaghir", "yki": "Yoke", "ykk": "Yakaikeke", "ykl": "Khlula", "ykm": "Kap", "ykn": "Kua-nsi", "yko": "Yasa", "ykr": "Yekora", "ykt": "Kathu", "yku": "Kuamasi", "yky": "Yakoma", "yla": "Yaul", "ylb": "Yaleba", "yle": "Yele", "ylg": "Yelogu", "yli": "Angguruk Yali", "yll": "Yil", "ylm": "Limi", "yln": "Langnian Buyang", "ylo": "Naluo Yi", "ylr": "Yalarnnga", "ylu": "Aribwaung", "yly": "Nyâlayu; Nyelâyu", "ymb": "Yambes", "ymc": "Southern Muji", "ymd": "Muda", "yme": "Yameo", "ymg": "Yamongeri", "ymh": "Mili", "ymi": "Moji", "ymk": "Makwe", "yml": "Iamalele", "ymm": "Maay", "ymn": "Yamna; Sunum", "ymo": "Yangum Mon", "ymp": "Yamap", "ymq": "Qila Muji", "ymr": "Malasar", "yms": "Mysian", "ymx": "Northern Muji", "ymz": "Muzi", "yna": "Aluo", "ynd": "Yandruwandha", "yne": "Lang'e", "yng": "Yango", "ynk": "Naukan Yupik", "ynl": "Yangulam", "ynn": "Yana", "yno": "Yong", "ynq": "Yendang", "yns": "Yansi", "ynu": "Yahuna", "yo": "Yoruba", "yob": "Yoba", "yog": "Yogad", "yoi": "Yonaguni", "yok": "Yokuts", "yol": "Yola", "yom": "Yombe", "yon": "Yongkom", "yot": "Yotti", "yox": "Yoron", "yoy": "Yoy", "ypa": "Phala", "ypb": "Labo Phowa", "ypg": "Phola", "yph": "Phupha", "ypk": "Yupik languages", "ypm": "Phuma", "ypn": "Ani Phowa", "ypo": "Alo Phola", "ypp": "Phupa", "ypz": "Phuza", "yra": "Yerakai", "yrb": "Yareba", "yre": "Yaouré", "yrk": "Nenets", "yrl": "Nhengatu", "yrm": "Yirrk-Mel", "yrn": "Yerong", "yro": "Yaroamë", "yrs": "Yarsun", "yrw": "Yarawata", "yry": "Yarluyandi", "ysc": "Yassic", "ysd": "Samatao", "ysg": "Sonaga", "ysl": "Yugoslavian Sign Language", "ysm": "Myanmar Sign Language", "ysn": "Sani", "yso": "Nisi (China)", "ysp": "Southern Lolopo", "ysr": "Sirenik Yupik", "yss": "Yessan-Mayo", "ysy": "Sanie", "yta": "Talu", "ytl": "Tanglang", "ytp": "Thopho", "ytw": "Yout Wam", "yty": "Yatay", "yua": "Yucateco; Yucatec Maya", "yub": "Yugambal", "yuc": "Yuchi", "yud": "Judeo-Tripolitanian Arabic", "yue": "Yue Chinese; Cantonese", "yuf": "Havasupai-Walapai-Yavapai", "yug": "Yug", "yui": "Yurutí", "yuj": "Karkar-Yuri", "yuk": "Yuki", "yul": "Yulu", "yum": "Quechan", "yun": "Bena (Nigeria)", "yup": "Yukpa", "yuq": "Yuqui", "yur": "Yurok", "yut": "Yopno", "yuw": "Yau (Morobe Province)", "yux": "Southern Yukaghir", "yuy": "East Yugur", "yuz": "Yuracare", "yva": "Yawa", "yvt": "Yavitero", "ywa": "Kalou", "ywg": "Yinhawangka", "ywl": "Western Lalu", "ywn": "Yawanawa", "ywq": "Wuding-Luquan Yi", "ywr": "Yawuru", "ywt": "Xishanba Lalo; Central Lalo", "ywu": "Wumeng Nasu", "yww": "Yawarawarga", "yxa": "Mayawali", "yxg": "Yagara", "yxl": "Yardliyawarra", "yxm": "Yinwum", "yxu": "Yuyu", "yxy": "Yabula Yabula", "yyr": "Yir Yoront", "yyu": "Yau (Sandaun Province)", "yyz": "Ayizi", "yzg": "E'ma Buyang", "yzk": "Zokhuo", "za": "Zhuang; Chuang", "zaa": "Sierra de Juárez Zapotec", "zab": "Western Tlacolula Valley Zapotec; San Juan Guelavía Zapotec", "zac": "Ocotlán Zapotec", "zad": "Cajonos Zapotec", "zae": "Yareni Zapotec", "zaf": "Ayoquesco Zapotec", "zag": "Zaghawa", "zah": "Zangwal", "zai": "Isthmus Zapotec", "zaj": "Zaramo", "zak": "Zanaki", "zal": "Zauzou", "zam": "Miahuatlán Zapotec", "zao": "Ozolotepec Zapotec", "zap": "Zapotec", "zaq": "Aloápam Zapotec", "zar": "Rincón Zapotec", "zas": "Santo Domingo Albarradas Zapotec", "zat": "Tabaa Zapotec", "zau": "Zangskari", "zav": "Yatzachi Zapotec", "zaw": "Mitla Zapotec", "zax": "Xadani Zapotec", "zay": "Zayse-Zergulla; Zaysete", "zaz": "Zari", "zba": "Balaibalan", "zbc": "Central Berawan", "zbe": "East Berawan", "zbl": "Blissymbols; Bliss; Blissymbolics", "zbt": "Batui", "zbu": "Bu (Bauchi State)", "zbw": "West Berawan", "zca": "Coatecas Altas Zapotec", "zcd": "Las Delicias Zapotec", "zch": "Central Hongshuihe Zhuang", "zdj": "Ngazidja Comorian", "zea": "Zeeuws", "zeg": "Zenag", "zeh": "Eastern Hongshuihe Zhuang", "zen": "Zenaga", "zga": "Kinga", "zgb": "Guibei Zhuang", "zgh": "Standard Moroccan Tamazight", "zgm": "Minz Zhuang", "zgn": "Guibian Zhuang", "zgr": "Magori", "zh": "Chinese", "zhb": "Zhaba", "zhd": "Dai Zhuang", "zhi": "Zhire", "zhn": "Nong Zhuang", "zhw": "Zhoa", "zhx": "Chinese (family)", "zia": "Zia", "zib": "Zimbabwe Sign Language", "zik": "Zimakani", "zil": "Zialo", "zim": "Mesme", "zin": "Zinza", "ziw": "Zigula", "ziz": "Zizilivakan", "zka": "Kaimbulawa", "zkb": "Koibal", "zkd": "Kadu", "zkg": "Koguryo", "zkh": "Khorezmian", "zkk": "Karankawa", "zkn": "Kanan", "zko": "Kott", "zkp": "São Paulo Kaingáng", "zkr": "Zakhring", "zkt": "Kitan", "zku": "Kaurna", "zkv": "Krevinian", "zkz": "Khazar", "zla": "Zula", "zle": "East Slavic languages", "zlj": "Liujiang Zhuang", "zlm": "Malay (individual language)", "zln": "Lianshan Zhuang", "zlq": "Liuqian Zhuang", "zls": "South Slavic languages", "zlw": "West Slavic languages", "zma": "Manda (Australia)", "zmb": "Zimba", "zmc": "Margany", "zmd": "Maridan", "zme": "Mangerr", "zmf": "Mfinu", "zmg": "Marti Ke", "zmh": "Makolkol", "zmi": "Negeri Sembilan Malay", "zmj": "Maridjabin", "zmk": "Mandandanyi", "zml": "Matngala", "zmm": "Marimanindji; Marramaninyshi", "zmn": "Mbangwe", "zmo": "Molo", "zmp": "Mpuono", "zmq": "Mituku", "zmr": "Maranunggu", "zms": "Mbesa", "zmt": "Maringarr", "zmu": "Muruwari", "zmv": "Mbariman-Gudhinma", "zmw": "Mbo (Democratic Republic of Congo)", "zmx": "Bomitaba", "zmy": "Mariyedi", "zmz": "Mbandja", "zna": "Zan Gula", "znd": "Zande languages", "zne": "Zande (individual language)", "zng": "Mang", "znk": "Manangkari", "zns": "Mangas", "zoc": "Copainalá Zoque", "zoh": "Chimalapa Zoque", "zom": "Zou", "zoo": "Asunción Mixtepec Zapotec", "zoq": "Tabasco Zoque", "zor": "Rayón Zoque", "zos": "Francisco León Zoque", "zpa": "Lachiguiri Zapotec", "zpb": "Yautepec Zapotec", "zpc": "Choapan Zapotec", "zpd": "Southeastern Ixtlán Zapotec", "zpe": "Petapa Zapotec", "zpf": "San Pedro Quiatoni Zapotec", "zpg": "Guevea De Humboldt Zapotec", "zph": "Totomachapan Zapotec", "zpi": "Santa María Quiegolani Zapotec", "zpj": "Quiavicuzas Zapotec", "zpk": "Tlacolulita Zapotec", "zpl": "Lachixío Zapotec", "zpm": "Mixtepec Zapotec", "zpn": "Santa Inés Yatzechi Zapotec", "zpo": "Amatlán Zapotec", "zpp": "El Alto Zapotec", "zpq": "Zoogocho Zapotec", "zpr": "Santiago Xanica Zapotec", "zps": "Coatlán Zapotec", "zpt": "San Vicente Coatlán Zapotec", "zpu": "Yalálag Zapotec", "zpv": "Chichicapan Zapotec", "zpw": "Zaniza Zapotec", "zpx": "San Baltazar Loxicha Zapotec", "zpy": "Mazaltepec Zapotec", "zpz": "Texmelucan Zapotec", "zqe": "Qiubei Zhuang", "zra": "Kara (Korea)", "zrg": "Mirgan", "zrn": "Zerenkel", "zro": "Záparo", "zrp": "Zarphatic", "zrs": "Mairasi", "zsa": "Sarasira", "zsk": "Kaskean", "zsl": "Zambian Sign Language", "zsm": "Standard Malay", "zsr": "Southern Rincon Zapotec", "zsu": "Sukurum", "zte": "Elotepec Zapotec", "ztg": "Xanaguía Zapotec", "ztl": "Lapaguía-Guivini Zapotec", "ztm": "San Agustín Mixtepec Zapotec", "ztn": "Santa Catarina Albarradas Zapotec", "ztp": "Loxicha Zapotec", "ztq": "Quioquitani-Quierí Zapotec", "zts": "Tilquiapan Zapotec", "ztt": "Tejalapan Zapotec", "ztu": "Güilá Zapotec", "ztx": "Zaachila Zapotec", "zty": "Yatee Zapotec", "zu": "Zulu", "zua": "Zeem", "zuh": "Tokano", "zum": "Kumzari", "zun": "Zuni", "zuy": "Zumaya", "zwa": "Zay", "zyb": "Yongbei Zhuang", "zyg": "Yang Zhuang", "zyj": "Youjiang Zhuang", "zyn": "Yongnan Zhuang", "zyp": "Zyphe Chin", "zza": "Zaza; Dimili; Dimli (macrolanguage); Kirdki; Kirmanjki (macrolanguage); Zazaki", "zzj": "Zuojiang Zhuang" }
datasets/src/datasets/utils/resources/languages.json/0
{ "file_path": "datasets/src/datasets/utils/resources/languages.json", "repo_id": "datasets", "token_count": 111198 }
182
import os import tarfile import pyarrow as pa import pytest from datasets import Dataset, concatenate_datasets, load_dataset from datasets.features import Audio, Features, Sequence, Value from ..utils import ( require_sndfile, ) @pytest.fixture() def tar_wav_path(shared_datadir, tmp_path_factory): audio_path = str(shared_datadir / "test_audio_44100.wav") path = tmp_path_factory.mktemp("data") / "audio_data.wav.tar" with tarfile.TarFile(path, "w") as f: f.add(audio_path, arcname=os.path.basename(audio_path)) return path @pytest.fixture() def tar_mp3_path(shared_datadir, tmp_path_factory): audio_path = str(shared_datadir / "test_audio_44100.mp3") path = tmp_path_factory.mktemp("data") / "audio_data.mp3.tar" with tarfile.TarFile(path, "w") as f: f.add(audio_path, arcname=os.path.basename(audio_path)) return path def iter_archive(archive_path): with tarfile.open(archive_path) as tar: for tarinfo in tar: file_path = tarinfo.name file_obj = tar.extractfile(tarinfo) yield file_path, file_obj def test_audio_instantiation(): audio = Audio() assert audio.sampling_rate is None assert audio.mono is True assert audio.id is None assert audio.dtype == "dict" assert audio.pa_type == pa.struct({"bytes": pa.binary(), "path": pa.string()}) assert audio._type == "Audio" def test_audio_feature_type_to_arrow(): features = Features({"audio": Audio()}) assert features.arrow_schema == pa.schema({"audio": Audio().pa_type}) features = Features({"struct_containing_an_audio": {"audio": Audio()}}) assert features.arrow_schema == pa.schema({"struct_containing_an_audio": pa.struct({"audio": Audio().pa_type})}) features = Features({"sequence_of_audios": Sequence(Audio())}) assert features.arrow_schema == pa.schema({"sequence_of_audios": pa.list_(Audio().pa_type)}) @pytest.mark.parametrize( "build_example", [ lambda audio_path: audio_path, lambda audio_path: open(audio_path, "rb").read(), lambda audio_path: {"path": audio_path}, lambda audio_path: {"path": audio_path, "bytes": None}, lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read()}, lambda audio_path: {"path": None, "bytes": open(audio_path, "rb").read()}, lambda audio_path: {"bytes": open(audio_path, "rb").read()}, lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000}, ], ) def test_audio_feature_encode_example(shared_datadir, build_example): audio_path = str(shared_datadir / "test_audio_44100.wav") audio = Audio() encoded_example = audio.encode_example(build_example(audio_path)) assert isinstance(encoded_example, dict) assert encoded_example.keys() == {"bytes", "path"} assert encoded_example["bytes"] is not None or encoded_example["path"] is not None decoded_example = audio.decode_example(encoded_example) assert decoded_example.keys() == {"path", "array", "sampling_rate"} @pytest.mark.parametrize( "build_example", [ lambda audio_path: {"path": audio_path, "sampling_rate": 16_000}, lambda audio_path: {"path": audio_path, "bytes": None, "sampling_rate": 16_000}, lambda audio_path: {"path": audio_path, "bytes": open(audio_path, "rb").read(), "sampling_rate": 16_000}, lambda audio_path: {"array": [0.1, 0.2, 0.3], "sampling_rate": 16_000}, ], ) def test_audio_feature_encode_example_pcm(shared_datadir, build_example): audio_path = str(shared_datadir / "test_audio_16000.pcm") audio = Audio(sampling_rate=16_000) encoded_example = audio.encode_example(build_example(audio_path)) assert isinstance(encoded_example, dict) assert encoded_example.keys() == {"bytes", "path"} assert encoded_example["bytes"] is not None or encoded_example["path"] is not None decoded_example = audio.decode_example(encoded_example) assert decoded_example.keys() == {"path", "array", "sampling_rate"} @require_sndfile def test_audio_decode_example(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") audio = Audio() decoded_example = audio.decode_example(audio.encode_example(audio_path)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] == audio_path assert decoded_example["array"].shape == (202311,) assert decoded_example["sampling_rate"] == 44100 with pytest.raises(RuntimeError): Audio(decode=False).decode_example(audio_path) @require_sndfile def test_audio_resampling(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") audio = Audio(sampling_rate=16000) decoded_example = audio.decode_example(audio.encode_example(audio_path)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] == audio_path assert decoded_example["array"].shape == (73401,) assert decoded_example["sampling_rate"] == 16000 @require_sndfile def test_audio_decode_example_mp3(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.mp3") audio = Audio() decoded_example = audio.decode_example(audio.encode_example(audio_path)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] == audio_path assert decoded_example["array"].shape == (110592,) assert decoded_example["sampling_rate"] == 44100 @require_sndfile def test_audio_decode_example_opus(shared_datadir): audio_path = str(shared_datadir / "test_audio_48000.opus") audio = Audio() decoded_example = audio.decode_example(audio.encode_example(audio_path)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] == audio_path assert decoded_example["array"].shape == (48000,) assert decoded_example["sampling_rate"] == 48000 @pytest.mark.parametrize("sampling_rate", [16_000, 48_000]) def test_audio_decode_example_pcm(shared_datadir, sampling_rate): audio_path = str(shared_datadir / "test_audio_16000.pcm") audio_input = {"path": audio_path, "sampling_rate": 16_000} audio = Audio(sampling_rate=sampling_rate) decoded_example = audio.decode_example(audio.encode_example(audio_input)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] is None assert decoded_example["array"].shape == (16208 * sampling_rate // 16_000,) assert decoded_example["sampling_rate"] == sampling_rate @require_sndfile def test_audio_resampling_mp3_different_sampling_rates(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.mp3") audio_path2 = str(shared_datadir / "test_audio_16000.mp3") audio = Audio(sampling_rate=48000) decoded_example = audio.decode_example(audio.encode_example(audio_path)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] == audio_path assert decoded_example["array"].shape == (120373,) assert decoded_example["sampling_rate"] == 48000 decoded_example = audio.decode_example(audio.encode_example(audio_path2)) assert decoded_example.keys() == {"path", "array", "sampling_rate"} assert decoded_example["path"] == audio_path2 assert decoded_example["array"].shape == (122688,) assert decoded_example["sampling_rate"] == 48000 @require_sndfile def test_dataset_with_audio_feature(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path]} features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (202311,) assert item["audio"]["sampling_rate"] == 44100 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (202311,) assert batch["audio"][0]["sampling_rate"] == 44100 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (202311,) assert column[0]["sampling_rate"] == 44100 @require_sndfile def test_dataset_with_audio_feature_tar_wav(tar_wav_path): audio_filename = "test_audio_44100.wav" data = {"audio": []} for file_path, file_obj in iter_archive(tar_wav_path): data["audio"].append({"path": file_path, "bytes": file_obj.read()}) break features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_filename assert item["audio"]["array"].shape == (202311,) assert item["audio"]["sampling_rate"] == 44100 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_filename assert batch["audio"][0]["array"].shape == (202311,) assert batch["audio"][0]["sampling_rate"] == 44100 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_filename assert column[0]["array"].shape == (202311,) assert column[0]["sampling_rate"] == 44100 @require_sndfile def test_dataset_with_audio_feature_tar_mp3(tar_mp3_path): audio_filename = "test_audio_44100.mp3" data = {"audio": []} for file_path, file_obj in iter_archive(tar_mp3_path): data["audio"].append({"path": file_path, "bytes": file_obj.read()}) break features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_filename assert item["audio"]["array"].shape == (110592,) assert item["audio"]["sampling_rate"] == 44100 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_filename assert batch["audio"][0]["array"].shape == (110592,) assert batch["audio"][0]["sampling_rate"] == 44100 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_filename assert column[0]["array"].shape == (110592,) assert column[0]["sampling_rate"] == 44100 @require_sndfile def test_dataset_with_audio_feature_with_none(): data = {"audio": [None]} features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"] is None batch = dset[:1] assert len(batch) == 1 assert batch.keys() == {"audio"} assert isinstance(batch["audio"], list) and all(item is None for item in batch["audio"]) column = dset["audio"] assert len(column) == 1 assert isinstance(column, list) and all(item is None for item in column) # nested tests data = {"audio": [[None]]} features = Features({"audio": Sequence(Audio())}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert all(i is None for i in item["audio"]) data = {"nested": [{"audio": None}]} features = Features({"nested": {"audio": Audio()}}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"nested"} assert item["nested"].keys() == {"audio"} assert item["nested"]["audio"] is None @require_sndfile def test_resampling_at_loading_dataset_with_audio_feature(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path]} features = Features({"audio": Audio(sampling_rate=16000)}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (73401,) assert item["audio"]["sampling_rate"] == 16000 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (73401,) assert batch["audio"][0]["sampling_rate"] == 16000 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (73401,) assert column[0]["sampling_rate"] == 16000 @require_sndfile def test_resampling_at_loading_dataset_with_audio_feature_mp3(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.mp3") data = {"audio": [audio_path]} features = Features({"audio": Audio(sampling_rate=16000)}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (40125,) assert item["audio"]["sampling_rate"] == 16000 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (40125,) assert batch["audio"][0]["sampling_rate"] == 16000 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (40125,) assert column[0]["sampling_rate"] == 16000 @require_sndfile def test_resampling_after_loading_dataset_with_audio_feature(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path]} features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item["audio"]["sampling_rate"] == 44100 dset = dset.cast_column("audio", Audio(sampling_rate=16000)) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (73401,) assert item["audio"]["sampling_rate"] == 16000 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (73401,) assert batch["audio"][0]["sampling_rate"] == 16000 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (73401,) assert column[0]["sampling_rate"] == 16000 @require_sndfile def test_resampling_after_loading_dataset_with_audio_feature_mp3(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.mp3") data = {"audio": [audio_path]} features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item["audio"]["sampling_rate"] == 44100 dset = dset.cast_column("audio", Audio(sampling_rate=16000)) item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (40125,) assert item["audio"]["sampling_rate"] == 16000 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (40125,) assert batch["audio"][0]["sampling_rate"] == 16000 column = dset["audio"] assert len(column) == 1 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (40125,) assert column[0]["sampling_rate"] == 16000 @pytest.mark.parametrize( "build_data", [ lambda audio_path: {"audio": [audio_path]}, lambda audio_path: {"audio": [open(audio_path, "rb").read()]}, lambda audio_path: {"audio": [{"path": audio_path}]}, lambda audio_path: {"audio": [{"path": audio_path, "bytes": None}]}, lambda audio_path: {"audio": [{"path": audio_path, "bytes": open(audio_path, "rb").read()}]}, lambda audio_path: {"audio": [{"path": None, "bytes": open(audio_path, "rb").read()}]}, lambda audio_path: {"audio": [{"bytes": open(audio_path, "rb").read()}]}, ], ) def test_dataset_cast_to_audio_features(shared_datadir, build_data): audio_path = str(shared_datadir / "test_audio_44100.wav") data = build_data(audio_path) dset = Dataset.from_dict(data) item = dset.cast(Features({"audio": Audio()}))[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} item = dset.cast_column("audio", Audio())[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} def test_dataset_concatenate_audio_features(shared_datadir): # we use a different data structure between 1 and 2 to make sure they are compatible with each other audio_path = str(shared_datadir / "test_audio_44100.wav") data1 = {"audio": [audio_path]} dset1 = Dataset.from_dict(data1, features=Features({"audio": Audio()})) data2 = {"audio": [{"bytes": open(audio_path, "rb").read()}]} dset2 = Dataset.from_dict(data2, features=Features({"audio": Audio()})) concatenated_dataset = concatenate_datasets([dset1, dset2]) assert len(concatenated_dataset) == len(dset1) + len(dset2) assert concatenated_dataset[0]["audio"]["array"].shape == dset1[0]["audio"]["array"].shape assert concatenated_dataset[1]["audio"]["array"].shape == dset2[0]["audio"]["array"].shape def test_dataset_concatenate_nested_audio_features(shared_datadir): # we use a different data structure between 1 and 2 to make sure they are compatible with each other audio_path = str(shared_datadir / "test_audio_44100.wav") features = Features({"list_of_structs_of_audios": [{"audio": Audio()}]}) data1 = {"list_of_structs_of_audios": [[{"audio": audio_path}]]} dset1 = Dataset.from_dict(data1, features=features) data2 = {"list_of_structs_of_audios": [[{"audio": {"bytes": open(audio_path, "rb").read()}}]]} dset2 = Dataset.from_dict(data2, features=features) concatenated_dataset = concatenate_datasets([dset1, dset2]) assert len(concatenated_dataset) == len(dset1) + len(dset2) assert ( concatenated_dataset[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape == dset1[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape ) assert ( concatenated_dataset[1]["list_of_structs_of_audios"][0]["audio"]["array"].shape == dset2[0]["list_of_structs_of_audios"][0]["audio"]["array"].shape ) @require_sndfile def test_dataset_with_audio_feature_map_is_not_decoded(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path], "text": ["Hello"]} features = Features({"audio": Audio(), "text": Value("string")}) dset = Dataset.from_dict(data, features=features) expected_audio = features.encode_batch(data)["audio"][0] for item in dset.cast_column("audio", Audio(decode=False)): assert item.keys() == {"audio", "text"} assert item == {"audio": expected_audio, "text": "Hello"} def process_text(example): example["text"] = example["text"] + " World!" return example processed_dset = dset.map(process_text) for item in processed_dset.cast_column("audio", Audio(decode=False)): assert item.keys() == {"audio", "text"} assert item == {"audio": expected_audio, "text": "Hello World!"} @require_sndfile def test_dataset_with_audio_feature_map_is_decoded(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path], "text": ["Hello"]} features = Features({"audio": Audio(), "text": Value("string")}) dset = Dataset.from_dict(data, features=features) def process_audio_sampling_rate_by_example(example): example["double_sampling_rate"] = 2 * example["audio"]["sampling_rate"] return example decoded_dset = dset.map(process_audio_sampling_rate_by_example) for item in decoded_dset.cast_column("audio", Audio(decode=False)): assert item.keys() == {"audio", "text", "double_sampling_rate"} assert item["double_sampling_rate"] == 88200 def process_audio_sampling_rate_by_batch(batch): double_sampling_rates = [] for audio in batch["audio"]: double_sampling_rates.append(2 * audio["sampling_rate"]) batch["double_sampling_rate"] = double_sampling_rates return batch decoded_dset = dset.map(process_audio_sampling_rate_by_batch, batched=True) for item in decoded_dset.cast_column("audio", Audio(decode=False)): assert item.keys() == {"audio", "text", "double_sampling_rate"} assert item["double_sampling_rate"] == 88200 @require_sndfile def test_formatted_dataset_with_audio_feature(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path, audio_path]} features = Features({"audio": Audio()}) dset = Dataset.from_dict(data, features=features) with dset.formatted_as("numpy"): item = dset[0] assert item.keys() == {"audio"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (202311,) assert item["audio"]["sampling_rate"] == 44100 batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (202311,) assert batch["audio"][0]["sampling_rate"] == 44100 column = dset["audio"] assert len(column) == 2 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (202311,) assert column[0]["sampling_rate"] == 44100 with dset.formatted_as("pandas"): item = dset[0] assert item.shape == (1, 1) assert item.columns == ["audio"] assert item["audio"][0].keys() == {"path", "array", "sampling_rate"} assert item["audio"][0]["path"] == audio_path assert item["audio"][0]["array"].shape == (202311,) assert item["audio"][0]["sampling_rate"] == 44100 batch = dset[:1] assert batch.shape == (1, 1) assert batch.columns == ["audio"] assert batch["audio"][0].keys() == {"path", "array", "sampling_rate"} assert batch["audio"][0]["path"] == audio_path assert batch["audio"][0]["array"].shape == (202311,) assert batch["audio"][0]["sampling_rate"] == 44100 column = dset["audio"] assert len(column) == 2 assert column[0].keys() == {"path", "array", "sampling_rate"} assert column[0]["path"] == audio_path assert column[0]["array"].shape == (202311,) assert column[0]["sampling_rate"] == 44100 @pytest.fixture def jsonl_audio_dataset_path(shared_datadir, tmp_path_factory): import json audio_path = str(shared_datadir / "test_audio_44100.wav") data = [{"audio": audio_path, "text": "Hello world!"}] path = str(tmp_path_factory.mktemp("data") / "audio_dataset.jsonl") with open(path, "w") as f: for item in data: f.write(json.dumps(item) + "\n") return path @require_sndfile @pytest.mark.parametrize("streaming", [False, True]) def test_load_dataset_with_audio_feature(streaming, jsonl_audio_dataset_path, shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data_files = jsonl_audio_dataset_path features = Features({"audio": Audio(), "text": Value("string")}) dset = load_dataset("json", split="train", data_files=data_files, features=features, streaming=streaming) item = dset[0] if not streaming else next(iter(dset)) assert item.keys() == {"audio", "text"} assert item["audio"].keys() == {"path", "array", "sampling_rate"} assert item["audio"]["path"] == audio_path assert item["audio"]["array"].shape == (202311,) assert item["audio"]["sampling_rate"] == 44100 @require_sndfile @pytest.mark.integration def test_dataset_with_audio_feature_loaded_from_cache(): # load first time ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean") # load from cache ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") assert isinstance(ds, Dataset) def test_dataset_with_audio_feature_undecoded(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path]} features = Features({"audio": Audio(decode=False)}) dset = Dataset.from_dict(data, features=features) item = dset[0] assert item.keys() == {"audio"} assert item["audio"] == {"path": audio_path, "bytes": None} batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0] == {"path": audio_path, "bytes": None} column = dset["audio"] assert len(column) == 1 assert column[0] == {"path": audio_path, "bytes": None} def test_formatted_dataset_with_audio_feature_undecoded(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path]} features = Features({"audio": Audio(decode=False)}) dset = Dataset.from_dict(data, features=features) with dset.formatted_as("numpy"): item = dset[0] assert item.keys() == {"audio"} assert item["audio"] == {"path": audio_path, "bytes": None} batch = dset[:1] assert batch.keys() == {"audio"} assert len(batch["audio"]) == 1 assert batch["audio"][0] == {"path": audio_path, "bytes": None} column = dset["audio"] assert len(column) == 1 assert column[0] == {"path": audio_path, "bytes": None} with dset.formatted_as("pandas"): item = dset[0] assert item.shape == (1, 1) assert item.columns == ["audio"] assert item["audio"][0] == {"path": audio_path, "bytes": None} batch = dset[:1] assert batch.shape == (1, 1) assert batch.columns == ["audio"] assert batch["audio"][0] == {"path": audio_path, "bytes": None} column = dset["audio"] assert len(column) == 1 assert column[0] == {"path": audio_path, "bytes": None} def test_dataset_with_audio_feature_map_undecoded(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") data = {"audio": [audio_path]} features = Features({"audio": Audio(decode=False)}) dset = Dataset.from_dict(data, features=features) def assert_audio_example_undecoded(example): assert example["audio"] == {"path": audio_path, "bytes": None} dset.map(assert_audio_example_undecoded) def assert_audio_batch_undecoded(batch): for audio in batch["audio"]: assert audio == {"path": audio_path, "bytes": None} dset.map(assert_audio_batch_undecoded, batched=True) def test_audio_embed_storage(shared_datadir): audio_path = str(shared_datadir / "test_audio_44100.wav") example = {"bytes": None, "path": audio_path} storage = pa.array([example], type=pa.struct({"bytes": pa.binary(), "path": pa.string()})) embedded_storage = Audio().embed_storage(storage) embedded_example = embedded_storage.to_pylist()[0] assert embedded_example == {"bytes": open(audio_path, "rb").read(), "path": "test_audio_44100.wav"}
datasets/tests/features/test_audio.py/0
{ "file_path": "datasets/tests/features/test_audio.py", "repo_id": "datasets", "token_count": 11528 }
183
import os import tempfile from unittest import TestCase import numpy as np import pandas as pd import pytest from datasets import load_from_disk from datasets.arrow_dataset import Dataset from datasets.dataset_dict import DatasetDict, IterableDatasetDict from datasets.features import ClassLabel, Features, Sequence, Value from datasets.iterable_dataset import IterableDataset from datasets.splits import NamedSplit from .utils import ( assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_polars, require_tf, require_torch, ) class DatasetDictTest(TestCase): def _create_dummy_dataset(self, multiple_columns=False): if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} dset = Dataset.from_dict(data) else: dset = Dataset.from_dict( {"filename": ["my_name-train" + "_" + f"{x:03d}" for x in np.arange(30).tolist()]} ) return dset def _create_dummy_dataset_dict(self, multiple_columns=False) -> DatasetDict: return DatasetDict( { "train": self._create_dummy_dataset(multiple_columns=multiple_columns), "test": self._create_dummy_dataset(multiple_columns=multiple_columns), } ) def _create_dummy_iterable_dataset(self, multiple_columns=False) -> IterableDataset: def gen(): if multiple_columns: data = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]} for v1, v2 in zip(data["col_1"], data["col_2"]): yield {"col_1": v1, "col_2": v2} else: for x in range(30): yield {"filename": "my_name-train" + "_" + f"{x:03d}"} return IterableDataset.from_generator(gen) def _create_dummy_iterable_dataset_dict(self, multiple_columns=False) -> IterableDatasetDict: return IterableDatasetDict( { "train": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), "test": self._create_dummy_iterable_dataset(multiple_columns=multiple_columns), } ) def test_flatten(self): dset_split = Dataset.from_dict( {"a": [{"b": {"c": ["text"]}}] * 10, "foo": [1] * 10}, features=Features({"a": {"b": Sequence({"c": Value("string")})}, "foo": Value("int64")}), ) dset = DatasetDict({"train": dset_split, "test": dset_split}) dset = dset.flatten() self.assertDictEqual(dset.column_names, {"train": ["a.b.c", "foo"], "test": ["a.b.c", "foo"]}) self.assertListEqual(sorted(dset["train"].features.keys()), ["a.b.c", "foo"]) self.assertDictEqual( dset["train"].features, Features({"a.b.c": Sequence(Value("string")), "foo": Value("int64")}) ) del dset def test_set_format_numpy(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="numpy", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], np.int64) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.reset_format() with dset.formatted_as(type="numpy", columns=["col_1"]): for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], np.int64) self.assertEqual(dset_split[0]["col_1"].item(), 3) for dset_split in dset.values(): self.assertEqual(dset_split.format["type"], None) self.assertEqual(dset_split.format["format_kwargs"], {}) self.assertEqual(dset_split.format["columns"], dset_split.column_names) self.assertEqual(dset_split.format["output_all_columns"], False) dset.set_format(type="numpy", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="numpy", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], np.str_) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset @require_torch def test_set_format_torch(self): import torch dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="torch", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="torch", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="torch") for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_1"], torch.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].item(), 3) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") del dset @require_tf def test_set_format_tf(self): import tensorflow as tf dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="tensorflow", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 1) self.assertIsInstance(dset_split[0]["col_1"], tf.Tensor) self.assertListEqual(list(dset_split[0]["col_1"].shape), []) self.assertEqual(dset_split[0]["col_1"].numpy().item(), 3) dset.set_format(type="tensorflow", columns=["col_1"], output_all_columns=True) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertIsInstance(dset_split[0]["col_2"], str) self.assertEqual(dset_split[0]["col_2"], "a") dset.set_format(type="tensorflow", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0]), 2) self.assertEqual(dset_split[0]["col_2"].numpy().decode("utf-8"), "a") del dset def test_set_format_pandas(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="pandas", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 1) self.assertIsInstance(dset_split[0], pd.DataFrame) self.assertListEqual(list(dset_split[0].shape), [1, 1]) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="pandas", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 2) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset @require_polars def test_set_format_polars(self): import polars as pl dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_format(type="polars", columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 1) self.assertIsInstance(dset_split[0], pl.DataFrame) self.assertEqual(dset_split[0].shape, (1, 1)) self.assertEqual(dset_split[0]["col_1"].item(), 3) dset.set_format(type="polars", columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].columns), 2) self.assertEqual(dset_split[0]["col_2"].item(), "a") del dset def test_set_transform(self): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} dset = self._create_dummy_dataset_dict(multiple_columns=True) dset.set_transform(transform=transform, columns=["col_1"]) for dset_split in dset.values(): self.assertEqual(dset_split.format["type"], "custom") self.assertEqual(len(dset_split[0].keys()), 1) self.assertEqual(dset_split[0]["col_1"], "3") self.assertEqual(dset_split[:2]["col_1"], ["3", "2"]) self.assertEqual(dset_split["col_1"][:2], ["3", "2"]) prev_format = dset[list(dset.keys())[0]].format for dset_split in dset.values(): dset_split.set_format(**dset_split.format) self.assertEqual(prev_format, dset_split.format) dset.set_transform(transform=transform, columns=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(len(dset_split[0].keys()), 2) self.assertEqual(dset_split[0]["col_2"], "A") del dset def test_with_format(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset2 = dset.with_format("numpy", columns=["col_1"]) dset.set_format("numpy", columns=["col_1"]) for dset_split, dset_split2 in zip(dset.values(), dset2.values()): self.assertDictEqual(dset_split.format, dset_split2.format) del dset, dset2 def test_with_transform(self): def transform(batch): return {k: [str(i).upper() for i in v] for k, v in batch.items()} dset = self._create_dummy_dataset_dict(multiple_columns=True) dset2 = dset.with_transform(transform, columns=["col_1"]) dset.set_transform(transform, columns=["col_1"]) for dset_split, dset_split2 in zip(dset.values(), dset2.values()): self.assertDictEqual(dset_split.format, dset_split2.format) del dset, dset2 def test_cast(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) features = dset["train"].features features["col_1"] = Value("float64") dset = dset.cast(features) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) self.assertEqual(dset_split.features["col_1"], Value("float64")) self.assertIsInstance(dset_split[0]["col_1"], float) del dset def test_remove_columns(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.remove_columns(column_names="col_1") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_2"]) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.remove_columns(column_names=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 0) dset = self._create_dummy_dataset_dict(multiple_columns=True) for dset_split in dset.values(): dset_split._format_columns = ["col_1", "col_2"] dset = dset.remove_columns(column_names=["col_1"]) for dset_split in dset.values(): self.assertListEqual(dset_split._format_columns, ["col_2"]) self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_2"]) del dset def test_rename_column(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.rename_column(original_column_name="col_1", new_column_name="new_name") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) self.assertListEqual(list(dset_split.column_names), ["new_name", "col_2"]) del dset def test_select_columns(self): dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names=[]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 0) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names="col_1") for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_1"]) dset = self._create_dummy_dataset_dict(multiple_columns=True) dset = dset.select_columns(column_names=["col_1", "col_2"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 2) dset = self._create_dummy_dataset_dict(multiple_columns=True) for dset_split in dset.values(): dset_split._format_columns = ["col_1", "col_2"] dset = dset.select_columns(column_names=["col_1"]) for dset_split in dset.values(): self.assertEqual(dset_split.num_columns, 1) self.assertListEqual(list(dset_split.column_names), ["col_1"]) self.assertListEqual(dset_split._format_columns, ["col_1"]) def test_map(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() mapped_dsets_1: DatasetDict = dsets.map(lambda ex: {"foo": ["bar"] * len(ex["filename"])}, batched=True) self.assertListEqual(list(dsets.keys()), list(mapped_dsets_1.keys())) self.assertListEqual(mapped_dsets_1["train"].column_names, ["filename", "foo"]) cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } mapped_dsets_2: DatasetDict = mapped_dsets_1.map( lambda ex: {"bar": ["foo"] * len(ex["filename"])}, batched=True, cache_file_names=cache_file_names ) self.assertListEqual(list(dsets.keys()), list(mapped_dsets_2.keys())) self.assertListEqual(sorted(mapped_dsets_2["train"].column_names), sorted(["filename", "foo", "bar"])) del dsets, mapped_dsets_1, mapped_dsets_2 def test_iterable_map(self): dsets = self._create_dummy_iterable_dataset_dict() fn_kwargs = {"n": 3} mapped_dsets: IterableDatasetDict = dsets.map( lambda x, n: {"foo": [n] * len(x["filename"])}, batched=True, fn_kwargs=fn_kwargs, ) mapped_example = next(iter(mapped_dsets["train"])) self.assertListEqual(sorted(mapped_example.keys()), sorted(["filename", "foo"])) self.assertLessEqual(mapped_example["foo"], 3) del dsets, mapped_dsets def test_filter(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() filtered_dsets_1: DatasetDict = dsets.filter(lambda ex: int(ex["filename"].split("_")[-1]) < 10) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_1.keys())) self.assertEqual(len(filtered_dsets_1["train"]), 10) cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } filtered_dsets_2: DatasetDict = filtered_dsets_1.filter( lambda ex: int(ex["filename"].split("_")[-1]) < 5, cache_file_names=cache_file_names ) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_2.keys())) self.assertEqual(len(filtered_dsets_2["train"]), 5) filtered_dsets_3: DatasetDict = dsets.filter( lambda examples: [int(ex.split("_")[-1]) < 10 for ex in examples["filename"]], batched=True ) self.assertListEqual(list(dsets.keys()), list(filtered_dsets_3.keys())) self.assertEqual(len(filtered_dsets_3["train"]), 10) del dsets, filtered_dsets_1, filtered_dsets_2, filtered_dsets_3 def test_iterable_filter(self): dsets = self._create_dummy_iterable_dataset_dict() example = next(iter(dsets["train"])) fn_kwargs = {"n": 3} filtered_dsets: IterableDatasetDict = dsets.filter( lambda ex, n: n < int(ex["filename"].split("_")[-1]), fn_kwargs=fn_kwargs ) filtered_example = next(iter(filtered_dsets["train"])) self.assertListEqual(list(example.keys()), list(filtered_example.keys())) self.assertEqual(int(filtered_example["filename"].split("_")[-1]), 4) # id starts from 3 del dsets, filtered_dsets def test_sort(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() sorted_dsets_1: DatasetDict = dsets.sort("filename") self.assertListEqual(list(dsets.keys()), list(sorted_dsets_1.keys())) self.assertListEqual( [f.split("_")[-1] for f in sorted_dsets_1["train"]["filename"]], sorted(f"{x:03d}" for x in range(30)), ) indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } sorted_dsets_2: DatasetDict = sorted_dsets_1.sort( "filename", indices_cache_file_names=indices_cache_file_names, reverse=True ) self.assertListEqual(list(dsets.keys()), list(sorted_dsets_2.keys())) self.assertListEqual( [f.split("_")[-1] for f in sorted_dsets_2["train"]["filename"]], sorted((f"{x:03d}" for x in range(30)), reverse=True), ) del dsets, sorted_dsets_1, sorted_dsets_2 def test_shuffle(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } seeds = { "train": 1234, "test": 1234, } dsets_shuffled = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False ) self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled["test"]["filename"]) self.assertEqual(len(dsets_shuffled["train"]), 30) self.assertEqual(dsets_shuffled["train"][0]["filename"], "my_name-train_028") self.assertEqual(dsets_shuffled["train"][2]["filename"], "my_name-train_010") self.assertDictEqual(dsets["train"].features, Features({"filename": Value("string")})) self.assertDictEqual(dsets_shuffled["train"].features, Features({"filename": Value("string")})) # Reproducibility indices_cache_file_names_2 = { "train": os.path.join(tmp_dir, "train_2.arrow"), "test": os.path.join(tmp_dir, "test_2.arrow"), } dsets_shuffled_2 = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names_2, load_from_cache_file=False ) self.assertListEqual(dsets_shuffled["train"]["filename"], dsets_shuffled_2["train"]["filename"]) seeds = { "train": 1234, "test": 1, } indices_cache_file_names_3 = { "train": os.path.join(tmp_dir, "train_3.arrow"), "test": os.path.join(tmp_dir, "test_3.arrow"), } dsets_shuffled_3 = dsets.shuffle( seeds=seeds, indices_cache_file_names=indices_cache_file_names_3, load_from_cache_file=False ) self.assertNotEqual(dsets_shuffled_3["train"]["filename"], dsets_shuffled_3["test"]["filename"]) # other input types dsets_shuffled_int = dsets.shuffle(42) dsets_shuffled_alias = dsets.shuffle(seed=42) dsets_shuffled_none = dsets.shuffle() self.assertEqual(len(dsets_shuffled_int["train"]), 30) self.assertEqual(len(dsets_shuffled_alias["train"]), 30) self.assertEqual(len(dsets_shuffled_none["train"]), 30) del dsets, dsets_shuffled, dsets_shuffled_2, dsets_shuffled_3 del dsets_shuffled_int, dsets_shuffled_alias, dsets_shuffled_none def test_flatten_indices(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() indices_cache_file_names = { "train": os.path.join(tmp_dir, "train.arrow"), "test": os.path.join(tmp_dir, "test.arrow"), } dsets_shuffled = dsets.shuffle( seed=42, indices_cache_file_names=indices_cache_file_names, load_from_cache_file=False ) self.assertIsNotNone(dsets_shuffled["train"]._indices) self.assertIsNotNone(dsets_shuffled["test"]._indices) dsets_flat = dsets_shuffled.flatten_indices() self.assertIsNone(dsets_flat["train"]._indices) self.assertIsNone(dsets_flat["test"]._indices) del dsets, dsets_shuffled, dsets_flat def test_check_values_type(self): dsets = self._create_dummy_dataset_dict() dsets["bad_split"] = None self.assertRaises(TypeError, dsets.map, lambda x: x) self.assertRaises(TypeError, dsets.filter, lambda x: True) self.assertRaises(TypeError, dsets.shuffle) self.assertRaises(TypeError, dsets.sort, "filename") del dsets def test_serialization(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) del reloaded_dsets del dsets["test"] dsets.save_to_disk(tmp_dir) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) del dsets, reloaded_dsets dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir, num_shards={"train": 3, "test": 2}) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["train"].cache_files), 3) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) del reloaded_dsets dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir, num_proc=2) reloaded_dsets = DatasetDict.load_from_disk(tmp_dir) self.assertListEqual(sorted(reloaded_dsets), ["test", "train"]) self.assertEqual(len(reloaded_dsets["train"]), 30) self.assertListEqual(reloaded_dsets["train"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["train"].cache_files), 2) self.assertEqual(len(reloaded_dsets["test"]), 30) self.assertListEqual(reloaded_dsets["test"].column_names, ["filename"]) self.assertEqual(len(reloaded_dsets["test"].cache_files), 2) del reloaded_dsets def test_load_from_disk(self): with tempfile.TemporaryDirectory() as tmp_dir: dsets = self._create_dummy_dataset_dict() dsets.save_to_disk(tmp_dir) del dsets dsets = load_from_disk(tmp_dir) self.assertListEqual(sorted(dsets), ["test", "train"]) self.assertEqual(len(dsets["train"]), 30) self.assertListEqual(dsets["train"].column_names, ["filename"]) self.assertEqual(len(dsets["test"]), 30) self.assertListEqual(dsets["test"].column_names, ["filename"]) del dsets def test_align_labels_with_mapping(self): train_features = Features( { "input_text": Value("string"), "input_labels": ClassLabel(num_classes=3, names=["entailment", "neutral", "contradiction"]), } ) test_features = Features( { "input_text": Value("string"), "input_labels": ClassLabel(num_classes=3, names=["entailment", "contradiction", "neutral"]), } ) train_data = {"input_text": ["a", "a", "b", "b", "c", "c"], "input_labels": [0, 0, 1, 1, 2, 2]} test_data = {"input_text": ["a", "a", "c", "c", "b", "b"], "input_labels": [0, 0, 1, 1, 2, 2]} label2id = {"CONTRADICTION": 0, "ENTAILMENT": 2, "NEUTRAL": 1} id2label = {v: k for k, v in label2id.items()} train_expected_labels = [2, 2, 1, 1, 0, 0] test_expected_labels = [2, 2, 0, 0, 1, 1] train_expected_label_names = [id2label[idx] for idx in train_expected_labels] test_expected_label_names = [id2label[idx] for idx in test_expected_labels] dsets = DatasetDict( { "train": Dataset.from_dict(train_data, features=train_features), "test": Dataset.from_dict(test_data, features=test_features), } ) dsets = dsets.align_labels_with_mapping(label2id, "input_labels") self.assertListEqual(train_expected_labels, dsets["train"]["input_labels"]) self.assertListEqual(test_expected_labels, dsets["test"]["input_labels"]) train_aligned_label_names = [ dsets["train"].features["input_labels"].int2str(idx) for idx in dsets["train"]["input_labels"] ] test_aligned_label_names = [ dsets["test"].features["input_labels"].int2str(idx) for idx in dsets["test"]["input_labels"] ] self.assertListEqual(train_expected_label_names, train_aligned_label_names) self.assertListEqual(test_expected_label_names, test_aligned_label_names) def test_dummy_datasetdict_serialize_fs(mockfs): dataset_dict = DatasetDict( { "train": Dataset.from_dict({"a": range(30)}), "test": Dataset.from_dict({"a": range(10)}), } ) dataset_path = "mock://my_dataset" dataset_dict.save_to_disk(dataset_path, storage_options=mockfs.storage_options) assert mockfs.isdir(dataset_path) assert mockfs.glob(dataset_path + "/*") reloaded = dataset_dict.load_from_disk(dataset_path, storage_options=mockfs.storage_options) assert list(reloaded) == list(dataset_dict) for k in dataset_dict: assert reloaded[k].features == dataset_dict[k].features assert reloaded[k].to_dict() == dataset_dict[k].to_dict() def _check_csv_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_csv_keep_in_memory(keep_in_memory, csv_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_csv({"train": csv_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_csv_features(features, csv_path, tmp_path): cache_dir = tmp_path / "cache" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" default_expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_csv({"train": csv_path}, features=features, cache_dir=cache_dir) _check_csv_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_csv_split(split, csv_path, tmp_path): if split: path = {split: csv_path} else: split = "train" path = {"train": csv_path, "test": csv_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "int64", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_csv(path, cache_dir=cache_dir) _check_csv_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_json_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_json_keep_in_memory(keep_in_memory, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_json({"train": jsonl_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_json_features(features, jsonl_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_json({"train": jsonl_path}, features=features, cache_dir=cache_dir) _check_json_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_json_splits(split, jsonl_path, tmp_path): if split: path = {split: jsonl_path} else: split = "train" path = {"train": jsonl_path, "test": jsonl_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_json(path, cache_dir=cache_dir) _check_json_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_parquet_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_parquet_keep_in_memory(keep_in_memory, parquet_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_parquet({"train": parquet_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ], ) def test_datasetdict_from_parquet_features(features, parquet_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_parquet({"train": parquet_path}, features=features, cache_dir=cache_dir) _check_parquet_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_parquet_split(split, parquet_path, tmp_path): if split: path = {split: parquet_path} else: split = "train" path = {"train": parquet_path, "test": parquet_path} cache_dir = tmp_path / "cache" expected_features = {"col_1": "string", "col_2": "int64", "col_3": "float64"} dataset = DatasetDict.from_parquet(path, cache_dir=cache_dir) _check_parquet_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def _check_text_datasetdict(dataset_dict, expected_features, splits=("train",)): assert isinstance(dataset_dict, DatasetDict) for split in splits: dataset = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory", [False, True]) def test_datasetdict_from_text_keep_in_memory(keep_in_memory, text_path, tmp_path): cache_dir = tmp_path / "cache" expected_features = {"text": "string"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): dataset = DatasetDict.from_text({"train": text_path}, cache_dir=cache_dir, keep_in_memory=keep_in_memory) _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize( "features", [ None, {"text": "string"}, {"text": "int32"}, {"text": "float32"}, ], ) def test_datasetdict_from_text_features(features, text_path, tmp_path): cache_dir = tmp_path / "cache" default_expected_features = {"text": "string"} expected_features = features.copy() if features else default_expected_features features = ( Features({feature: Value(dtype) for feature, dtype in features.items()}) if features is not None else None ) dataset = DatasetDict.from_text({"train": text_path}, features=features, cache_dir=cache_dir) _check_text_datasetdict(dataset, expected_features) @pytest.mark.parametrize("split", [None, NamedSplit("train"), "train", "test"]) def test_datasetdict_from_text_split(split, text_path, tmp_path): if split: path = {split: text_path} else: split = "train" path = {"train": text_path, "test": text_path} cache_dir = tmp_path / "cache" expected_features = {"text": "string"} dataset = DatasetDict.from_text(path, cache_dir=cache_dir) _check_text_datasetdict(dataset, expected_features, splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys())
datasets/tests/test_dataset_dict.py/0
{ "file_path": "datasets/tests/test_dataset_dict.py", "repo_id": "datasets", "token_count": 17807 }
184
import pickle from copy import deepcopy from itertools import chain, islice import numpy as np import pandas as pd import pyarrow as pa import pyarrow.compute as pc import pytest from datasets import Dataset, load_dataset from datasets.combine import concatenate_datasets, interleave_datasets from datasets.features import ( ClassLabel, Features, Image, Value, ) from datasets.formatting import get_format_type_from_alias from datasets.info import DatasetInfo from datasets.iterable_dataset import ( ArrowExamplesIterable, BufferShuffledExamplesIterable, CyclingMultiSourcesExamplesIterable, ExamplesIterable, FilteredExamplesIterable, FormattingConfig, HorizontallyConcatenatedMultiSourcesExamplesIterable, IterableDataset, MappedExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable, SelectColumnsIterable, ShuffledDataSourcesArrowExamplesIterable, ShuffledDataSourcesExamplesIterable, ShufflingConfig, SkipExamplesIterable, StepExamplesIterable, TakeExamplesIterable, TypedExamplesIterable, VerticallyConcatenatedMultiSourcesExamplesIterable, _BaseExamplesIterable, _batch_arrow_tables, _batch_to_examples, _convert_to_arrow, _examples_to_batch, ) from .utils import ( assert_arrow_memory_doesnt_increase, is_rng_equal, require_dill_gt_0_3_2, require_not_windows, require_pyspark, require_tf, require_torch, ) DEFAULT_N_EXAMPLES = 20 DEFAULT_BATCH_SIZE = 4 DEFAULT_FILEPATH = "file.txt" SAMPLE_DATASET_IDENTIFIER = "hf-internal-testing/dataset_with_script" # has dataset script def generate_examples_fn(**kwargs): kwargs = kwargs.copy() n = kwargs.pop("n", DEFAULT_N_EXAMPLES) filepaths = kwargs.pop("filepaths", None) for filepath in filepaths or [DEFAULT_FILEPATH]: if filepaths is not None: kwargs["filepath"] = filepath for i in range(n): yield f"{filepath}_{i}", {"id": i, **kwargs} def generate_tables_fn(**kwargs): kwargs = kwargs.copy() n = kwargs.pop("n", DEFAULT_N_EXAMPLES) batch_size = kwargs.pop("batch_size", DEFAULT_BATCH_SIZE) filepaths = kwargs.pop("filepaths", None) for filepath in filepaths or [DEFAULT_FILEPATH]: buffer = [] batch_idx = 0 if filepaths is not None: kwargs["filepath"] = filepath for i in range(n): buffer.append({"id": i, **kwargs}) if len(buffer) == batch_size: yield f"{filepath}_{batch_idx}", pa.Table.from_pylist(buffer) buffer = [] batch_idx += 1 yield batch_idx, pa.Table.from_pylist(buffer) @pytest.fixture def dataset(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train") @pytest.fixture def dataset_with_several_columns(): ex_iterable = ExamplesIterable( generate_examples_fn, {"filepath": ["data0.txt", "data1.txt", "data2.txt"], "metadata": {"sources": ["https://foo.bar"]}}, ) return IterableDataset(ex_iterable, info=DatasetInfo(description="dummy"), split="train") @pytest.fixture def arrow_file(tmp_path_factory, dataset: IterableDataset): filename = str(tmp_path_factory.mktemp("data") / "file.arrow") Dataset.from_generator(dataset.__iter__).map(cache_file_name=filename) return filename ################################ # # Utilities tests # ################################ @pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_convert_to_arrow(batch_size, drop_last_batch): examples = [{"foo": i} for i in range(10)] full_table = pa.Table.from_pylist(examples) num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size subtables = list( _convert_to_arrow( list(enumerate(examples)), batch_size=batch_size, drop_last_batch=drop_last_batch, ) ) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for _, subtable in subtables) else: assert all(len(subtable) == batch_size for _, subtable in subtables[:-1]) assert len(subtables[-1][1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables([subtable for _, subtable in subtables]) assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() @pytest.mark.parametrize( "tables", [ [pa.table({"foo": range(10)})], [pa.table({"foo": range(0, 5)}), pa.table({"foo": range(5, 10)})], [pa.table({"foo": [i]}) for i in range(10)], ], ) @pytest.mark.parametrize("batch_size", [1, 2, 3, 9, 10, 11, 20]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_batch_arrow_tables(tables, batch_size, drop_last_batch): full_table = pa.concat_tables(tables) num_rows = len(full_table) if not drop_last_batch else len(full_table) // batch_size * batch_size num_batches = (num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size subtables = list( _batch_arrow_tables(list(enumerate(tables)), batch_size=batch_size, drop_last_batch=drop_last_batch) ) assert len(subtables) == num_batches if drop_last_batch: assert all(len(subtable) == batch_size for _, subtable in subtables) else: assert all(len(subtable) == batch_size for _, subtable in subtables[:-1]) assert len(subtables[-1][1]) <= batch_size if num_rows > 0: reloaded = pa.concat_tables([subtable for _, subtable in subtables]) assert full_table.slice(0, num_rows).to_pydict() == reloaded.to_pydict() ################################ # # _BaseExampleIterable tests # ################################ def test_examples_iterable(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) expected = list(generate_examples_fn()) assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert ex_iterable.iter_arrow is None def test_examples_iterable_with_kwargs(): ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"}) expected = list(generate_examples_fn(filepaths=["0.txt", "1.txt"], split="train")) assert list(ex_iterable) == expected assert all("split" in ex for _, ex in ex_iterable) assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"] def test_examples_iterable_shuffle_data_sources(): ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]}) ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40)) expected = list(generate_examples_fn(filepaths=["1.txt", "0.txt"])) # shuffle the filepaths assert list(ex_iterable) == expected def test_examples_iterable_shuffle_shards_and_metadata(): def gen(filepaths, all_metadata): for i, (filepath, metadata) in enumerate(zip(filepaths, all_metadata)): yield i, {"filepath": filepath, "metadata": metadata} ex_iterable = ExamplesIterable( gen, { "filepaths": [f"{i}.txt" for i in range(100)], "all_metadata": [{"id": str(i)} for i in range(100)], }, ) ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(42)) out = list(ex_iterable) filepaths_ids = [x["filepath"].split(".")[0] for _, x in out] metadata_ids = [x["metadata"]["id"] for _, x in out] assert filepaths_ids == metadata_ids, "entangled lists of shards/metadata should be shuffled the same way" def test_arrow_examples_iterable(): ex_iterable = ArrowExamplesIterable(generate_tables_fn, {}) expected = sum([pa_table.to_pylist() for _, pa_table in generate_tables_fn()], []) assert next(iter(ex_iterable))[1] == expected[0] assert [example for _, example in ex_iterable] == expected expected = list(generate_tables_fn()) assert list(ex_iterable.iter_arrow()) == expected def test_arrow_examples_iterable_with_kwargs(): ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"], "split": "train"}) expected = sum( [pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")], [] ) assert [example for _, example in ex_iterable] == expected assert all("split" in ex for _, ex in ex_iterable) assert sorted({ex["filepath"] for _, ex in ex_iterable}) == ["0.txt", "1.txt"] expected = list(generate_tables_fn(filepaths=["0.txt", "1.txt"], split="train")) assert list(ex_iterable.iter_arrow()) == expected def test_arrow_examples_iterable_shuffle_data_sources(): ex_iterable = ArrowExamplesIterable(generate_tables_fn, {"filepaths": ["0.txt", "1.txt"]}) ex_iterable = ex_iterable.shuffle_data_sources(np.random.default_rng(40)) expected = sum( [pa_table.to_pylist() for _, pa_table in generate_tables_fn(filepaths=["1.txt", "0.txt"])], [] ) # shuffle the filepaths assert [example for _, example in ex_iterable] == expected expected = list(generate_tables_fn(filepaths=["1.txt", "0.txt"])) assert list(ex_iterable.iter_arrow()) == expected @pytest.mark.parametrize("seed", [42, 1337, 101010, 123456]) def test_buffer_shuffled_examples_iterable(seed): n, buffer_size = 100, 30 generator = np.random.default_rng(seed) base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = BufferShuffledExamplesIterable(base_ex_iterable, buffer_size=buffer_size, generator=generator) rng = deepcopy(generator) expected_indices_used_for_shuffling = list( islice(BufferShuffledExamplesIterable._iter_random_indices(rng, buffer_size=buffer_size), n - buffer_size) ) # indices to pick in the shuffle buffer should all be in the right range assert all(0 <= index_to_pick < buffer_size for index_to_pick in expected_indices_used_for_shuffling) # it should be random indices assert expected_indices_used_for_shuffling != list(range(buffer_size)) # The final order of examples is the result of a shuffle buffer. all_examples = list(generate_examples_fn(n=n)) # We create a buffer and we pick random examples from it. buffer, rest = all_examples[:buffer_size], all_examples[buffer_size:] expected = [] for i, index_to_pick in enumerate(expected_indices_used_for_shuffling): expected.append(buffer[index_to_pick]) # The picked examples are directly replaced by the next examples from the iterable. buffer[index_to_pick] = rest.pop(0) # Once we have reached the end of the iterable, we shuffle the buffer and return the remaining examples. rng.shuffle(buffer) expected += buffer assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert sorted(ex_iterable) == sorted(all_examples) def test_cycling_multi_sources_examples_iterable(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) ex_iterable = CyclingMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = list(chain(*zip(generate_examples_fn(text="foo"), generate_examples_fn(text="bar")))) # The cycling stops as soon as one iterable is out of examples (here ex_iterable1), so the last sample from ex_iterable2 is unecessary expected = expected[:-1] assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected assert all((x["id"], x["text"]) == (i // 2, "bar" if i % 2 else "foo") for i, (_, x) in enumerate(ex_iterable)) @pytest.mark.parametrize("probabilities", [None, (0.5, 0.5), (0.9, 0.1)]) def test_randomly_cycling_multi_sources_examples_iterable(probabilities): seed = 42 generator = np.random.default_rng(seed) ex_iterable1 = ExamplesIterable(generate_examples_fn, {"text": "foo"}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"text": "bar"}) ex_iterable = RandomlyCyclingMultiSourcesExamplesIterable( [ex_iterable1, ex_iterable2], generator=generator, probabilities=probabilities ) # The source used randomly changes at each example. It stops when one of the iterators is empty. rng = deepcopy(generator) iterators = (generate_examples_fn(text="foo"), generate_examples_fn(text="bar")) indices_iterator = RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices( rng, len(iterators), p=probabilities ) expected = [] lengths = [len(list(ex_iterable1)), len(list(ex_iterable2))] for i in indices_iterator: if lengths[0] == 0 or lengths[1] == 0: break for key, example in iterators[i]: expected.append((key, example)) lengths[i] -= 1 break else: break assert next(iter(ex_iterable)) == expected[0] assert list(ex_iterable) == expected @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id (3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0 (3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [{**x, **func(x)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(batch) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x: {"id+1": x["id"] + 1}, False, None), # just add 1 to the id (3, lambda x: {"id+1": [x["id"][0] + 1]}, True, 1), # same with bs=1 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10), # same with bs=10 (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None), # same with bs=None (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1), # same with bs<=0 (3, lambda x: {k: v * 2 for k, v in x.items()}, True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_drop_last_batch(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True ) all_examples = [x for _, x in generate_examples_fn(n=n)] is_empty = False if batched is False: # `drop_last_batch` has no effect here expected = [{**x, **func(x)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] if len(examples) < batch_size: # ignore last batch break batch = _examples_to_batch(examples) transformed_batch = func(batch) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size] if all_examples: expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) else: is_empty = True if not is_empty: assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected else: with pytest.raises(StopIteration): next(iter(ex_iterable)) @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x, index: {"id+idx": x["id"] + index}, False, None), # add the index to the id ( 25, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, 10, ), # add the index to the id (5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, None), # same with bs=None (5, lambda x, indices: {"id+idx": [i + j for i, j in zip(x["id"], indices)]}, True, -1), # same with bs<=0 ], ) def test_mapped_examples_iterable_with_indices(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [{**x, **func(x, idx)} for idx, x in enumerate(all_examples)] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) indices = list(range(batch_offset, batch_offset + len(examples))) transformed_batch = func(batch, indices) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, remove_columns", [ (3, lambda x: {"id+1": x["id"] + 1}, False, None, ["extra_column"]), # just add 1 to the id (25, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, 10, ["extra_column"]), # same with bs=10 ( 50, lambda x: {"foo": ["bar"] * np.random.default_rng(x["id"][0]).integers(0, 10)}, True, 8, ["extra_column", "id"], ), # make a duplicate of each example (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, None, ["extra_column"]), # same with bs=None (5, lambda x: {"id+1": [i + 1 for i in x["id"]]}, True, -1, ["extra_column"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_remove_columns(n, func, batched, batch_size, remove_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns] if batched is False: expected = [{**{k: v for k, v in x.items() if k not in columns_to_remove}, **func(x)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(batch) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = {k: v for k, v in _examples_to_batch(all_examples).items() if k not in columns_to_remove} expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, fn_kwargs", [ (3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, None), (3, lambda x, y=0: {"id+y": x["id"] + y}, False, None, {"y": 3}), (25, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, 10, {"y": 3}), (5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, None, {"y": 3}), # same with bs=None (5, lambda x, y=0: {"id+y": [i + y for i in x["id"]]}, True, -1, {"y": 3}), # same with bs<=0 ], ) def test_mapped_examples_iterable_fn_kwargs(n, func, batched, batch_size, fn_kwargs): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs ) all_examples = [x for _, x in generate_examples_fn(n=n)] if fn_kwargs is None: fn_kwargs = {} if batched is False: expected = [{**x, **func(x, **fn_kwargs)} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(batch, **fn_kwargs) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, input_columns", [ (3, lambda id_: {"id+1": id_ + 1}, False, None, ["id"]), # just add 1 to the id (25, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, 10, ["id"]), # same with bs=10 (5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, None, ["id"]), # same with bs=None (5, lambda ids_: {"id+1": [i + 1 for i in ids_]}, True, -1, ["id"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_input_columns(n, func, batched, batch_size, input_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] if batched is False: expected = [{**x, **func(*[x[col] for col in columns_to_input])} for x in all_examples] else: # For batched map we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) transformed_batch = func(*[batch[col] for col in columns_to_input]) all_transformed_examples.extend(_batch_to_examples(transformed_batch)) expected = _examples_to_batch(all_examples) expected.update(_examples_to_batch(all_transformed_examples)) expected = list(_batch_to_examples(expected)) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_arrow_format(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None), # just add 1 to the id (3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 1), # same with bs=1 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10), # same with bs=10 (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1), # same with bs<=0 (3, lambda t: pa.concat_tables([t] * 2), True, 1), # make a duplicate of each example ], ) def test_mapped_examples_iterable_drop_last_batch_and_arrow_format(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, drop_last_batch=True, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] is_empty = False if batched is False: # `drop_last_batch` has no effect here expected = [func(pa.Table.from_pylist([x])).to_pylist()[0] for x in all_examples] else: all_transformed_examples = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] if len(examples) < batch_size: # ignore last batch break batch = pa.Table.from_pylist(examples) out = func(batch) all_transformed_examples.extend( out.to_pylist() ) # we don't merge with input since they're arrow tables and not dictionaries all_examples = all_examples if n % batch_size == 0 else all_examples[: n // batch_size * batch_size] if all_examples: expected = all_transformed_examples else: is_empty = True if not is_empty: assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected else: with pytest.raises(StopIteration): next(iter(ex_iterable)) @pytest.mark.parametrize( "n, func, batched, batch_size", [ ( 3, lambda t, index: t.append_column("id+idx", pc.add(t["id"], index)), False, None, ), # add the index to the id ( 25, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, 10, ), # add the index to the id (5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, None), # same with bs=None (5, lambda t, indices: t.append_column("id+idx", pc.add(t["id"], indices)), True, -1), # same with bs<=0 ], ) def test_mapped_examples_iterable_with_indices_and_arrow_format(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [func(pa.Table.from_pylist([x]), i).to_pylist()[0] for i, x in enumerate(all_examples)] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch, list(range(batch_offset, batch_offset + len(batch)))).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, remove_columns", [ ( 3, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), False, None, ["extra_column"], ), # just add 1 to the id (25, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, 10, ["extra_column"]), # same with bs=10 ( 50, lambda t: pa.table({"foo": ["bar"] * np.random.default_rng(t["id"][0].as_py()).integers(0, 10)}), True, 8, ["extra_column", "id"], ), # make a duplicate of each example (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, None, ["extra_column"]), # same with bs=None (5, lambda t: t.append_column("id+1", pc.add(t["id"], 1)), True, -1, ["extra_column"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_remove_columns_arrow_format(n, func, batched, batch_size, remove_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "extra_column": "foo"}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, remove_columns=remove_columns, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_remove = remove_columns if isinstance(remove_columns, list) else [remove_columns] if batched is False: expected = [ {**{k: v for k, v in func(pa.Table.from_pylist([x])).to_pylist()[0].items() if k not in columns_to_remove}} for x in all_examples ] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend( [{k: v for k, v in x.items() if k not in columns_to_remove} for x in func(batch).to_pylist()] ) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, fn_kwargs", [ (3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, None), (3, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), False, None, {"y": 3}), (25, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, 10, {"y": 3}), (5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, None, {"y": 3}), # same with bs=None (5, lambda t, y=0: t.append_column("id+idx", pc.add(t["id"], y)), True, -1, {"y": 3}), # same with bs<=0 ], ) def test_mapped_examples_iterable_fn_kwargs_and_arrow_format(n, func, batched, batch_size, fn_kwargs): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, fn_kwargs=fn_kwargs, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] if fn_kwargs is None: fn_kwargs = {} if batched is False: expected = [func(pa.Table.from_pylist([x]), **fn_kwargs).to_pylist()[0] for x in all_examples] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(batch, **fn_kwargs).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, input_columns", [ (3, lambda id_: pa.table({"id+1": pc.add(id_, 1)}), False, None, ["id"]), # just add 1 to the id (25, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, 10, ["id"]), # same with bs=10 (5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, None, ["id"]), # same with bs=None (5, lambda ids_: pa.table({"id+1": pc.add(ids_, 1)}), True, -1, ["id"]), # same with bs<=0 ], ) def test_mapped_examples_iterable_input_columns_and_arrow_format(n, func, batched, batch_size, input_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = MappedExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns, formatting=FormattingConfig(format_type="arrow"), ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] if batched is False: expected = [ func(*[pa.Table.from_pylist([x])[col] for col in columns_to_input]).to_pylist()[0] for x in all_examples ] else: expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = pa.Table.from_pylist(examples) expected.extend(func(*[batch[col] for col in columns_to_input]).to_pylist()) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x: x["id"] % 2 == 0, False, None), # keep even number (3, lambda x: [x["id"][0] % 2 == 0], True, 1), # same with bs=1 (25, lambda x: [i % 2 == 0 for i in x["id"]], True, 10), # same with bs=10 (5, lambda x: [i % 2 == 0 for i in x["id"]], True, None), # same with bs=None (5, lambda x: [i % 2 == 0 for i in x["id"]], True, -1), # same with bs<=0 (3, lambda x: False, False, None), # return 0 examples (3, lambda x: [False] * len(x["id"]), True, 10), # same with bs=10 ], ) def test_filtered_examples_iterable(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = FilteredExamplesIterable(base_ex_iterable, func, batched=batched, batch_size=batch_size) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [x for x in all_examples if func(x)] else: # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) mask = func(batch) expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) if expected: assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size", [ (3, lambda x, index: index % 2 == 0, False, None), # keep even number (25, lambda x, indices: [idx % 2 == 0 for idx in indices], True, 10), # same with bs=10 (5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, None), # same with bs=None (5, lambda x, indices: [idx % 2 == 0 for idx in indices], True, -1), # same with bs<=0 ], ) def test_filtered_examples_iterable_with_indices(n, func, batched, batch_size): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = FilteredExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, with_indices=True ) all_examples = [x for _, x in generate_examples_fn(n=n)] if batched is False: expected = [x for idx, x in enumerate(all_examples) if func(x, idx)] else: # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) indices = list(range(batch_offset, batch_offset + len(examples))) mask = func(batch, indices) expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected @pytest.mark.parametrize( "n, func, batched, batch_size, input_columns", [ (3, lambda id_: id_ % 2 == 0, False, None, ["id"]), # keep even number (25, lambda ids_: [i % 2 == 0 for i in ids_], True, 10, ["id"]), # same with bs=10 (3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None (3, lambda ids_: [i % 2 == 0 for i in ids_], True, None, ["id"]), # same with bs=None ], ) def test_filtered_examples_iterable_input_columns(n, func, batched, batch_size, input_columns): base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n}) ex_iterable = FilteredExamplesIterable( base_ex_iterable, func, batched=batched, batch_size=batch_size, input_columns=input_columns ) all_examples = [x for _, x in generate_examples_fn(n=n)] columns_to_input = input_columns if isinstance(input_columns, list) else [input_columns] if batched is False: expected = [x for x in all_examples if func(*[x[col] for col in columns_to_input])] else: # For batched filter we have to format the examples as a batch (i.e. in one single dictionary) to pass the batch to the function expected = [] # If batch_size is None or <=0, we use the whole dataset as a single batch if batch_size is None or batch_size <= 0: batch_size = len(all_examples) for batch_offset in range(0, len(all_examples), batch_size): examples = all_examples[batch_offset : batch_offset + batch_size] batch = _examples_to_batch(examples) mask = func(*[batch[col] for col in columns_to_input]) expected.extend([x for x, to_keep in zip(examples, mask) if to_keep]) assert next(iter(ex_iterable))[1] == expected[0] assert [x for _, x in ex_iterable] == expected def test_skip_examples_iterable(): total, count = 10, 2 base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total}) skip_ex_iterable = SkipExamplesIterable(base_ex_iterable, n=count) expected = list(generate_examples_fn(n=total))[count:] assert list(skip_ex_iterable) == expected assert ( skip_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is skip_ex_iterable ), "skip examples makes the shards order fixed" def test_take_examples_iterable(): total, count = 10, 2 base_ex_iterable = ExamplesIterable(generate_examples_fn, {"n": total}) take_ex_iterable = TakeExamplesIterable(base_ex_iterable, n=count) expected = list(generate_examples_fn(n=total))[:count] assert list(take_ex_iterable) == expected assert ( take_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is take_ex_iterable ), "skip examples makes the shards order fixed" def test_vertically_concatenated_examples_iterable(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2] assert [x for _, x in concatenated_ex_iterable] == expected def test_vertically_concatenated_examples_iterable_with_different_columns(): # having different columns is supported # Though iterable datasets fill the missing data with nulls ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {}) concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = [x for _, x in ex_iterable1] + [x for _, x in ex_iterable2] assert [x for _, x in concatenated_ex_iterable] == expected def test_vertically_concatenated_examples_iterable_shuffle_data_sources(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) concatenated_ex_iterable = VerticallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) rng = np.random.default_rng(42) shuffled_ex_iterable = concatenated_ex_iterable.shuffle_data_sources(rng) # make sure the list of examples iterables is shuffled, and each examples iterable is shuffled expected = [x for _, x in ex_iterable2.shuffle_data_sources(rng)] + [ x for _, x in ex_iterable1.shuffle_data_sources(rng) ] assert [x for _, x in shuffled_ex_iterable] == expected def test_horizontally_concatenated_examples_iterable(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) with pytest.raises(ValueError): # column "id" is duplicated -> raise an error list(concatenated_ex_iterable) ex_iterable2 = MappedExamplesIterable(ex_iterable2, lambda x: x, remove_columns=["id"]) concatenated_ex_iterable = HorizontallyConcatenatedMultiSourcesExamplesIterable([ex_iterable1, ex_iterable2]) expected = [{**x, **y} for (_, x), (_, y) in zip(ex_iterable1, ex_iterable2)] assert [x for _, x in concatenated_ex_iterable] == expected assert ( concatenated_ex_iterable.shuffle_data_sources(np.random.default_rng(42)) is concatenated_ex_iterable ), "horizontally concatenated examples makes the shards order fixed" @pytest.mark.parametrize( "ex_iterable", [ ExamplesIterable(generate_examples_fn, {}), ShuffledDataSourcesExamplesIterable(generate_examples_fn, {}, np.random.default_rng(42)), SelectColumnsIterable(ExamplesIterable(generate_examples_fn, {}), ["id"]), StepExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 2, 0), CyclingMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), VerticallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), HorizontallyConcatenatedMultiSourcesExamplesIterable([ExamplesIterable(generate_examples_fn, {})]), RandomlyCyclingMultiSourcesExamplesIterable( [ExamplesIterable(generate_examples_fn, {})], np.random.default_rng(42) ), MappedExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: x), MappedExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: x), FilteredExamplesIterable(ExamplesIterable(generate_examples_fn, {}), lambda x: True), FilteredExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), lambda x: True), BufferShuffledExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10, np.random.default_rng(42)), SkipExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10), TakeExamplesIterable(ExamplesIterable(generate_examples_fn, {}), 10), TypedExamplesIterable( ExamplesIterable(generate_examples_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={} ), ], ) def test_no_iter_arrow(ex_iterable: _BaseExamplesIterable): assert ex_iterable.iter_arrow is None @pytest.mark.parametrize( "ex_iterable", [ ArrowExamplesIterable(generate_tables_fn, {}), ShuffledDataSourcesArrowExamplesIterable(generate_tables_fn, {}, np.random.default_rng(42)), SelectColumnsIterable(ArrowExamplesIterable(generate_tables_fn, {}), ["id"]), # StepExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 2, 0), # not implemented # CyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented VerticallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # HorizontallyConcatenatedMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})]), # not implemented # RandomlyCyclingMultiSourcesExamplesIterable([ArrowExamplesIterable(generate_tables_fn, {})], np.random.default_rng(42)), # not implemented MappedExamplesIterable( ExamplesIterable(generate_examples_fn, {}), lambda t: t, formatting=FormattingConfig(format_type="arrow") ), MappedExamplesIterable( ArrowExamplesIterable(generate_tables_fn, {}), lambda t: t, formatting=FormattingConfig(format_type="arrow"), ), FilteredExamplesIterable( ExamplesIterable(generate_examples_fn, {}), lambda t: True, formatting=FormattingConfig(format_type="arrow"), ), FilteredExamplesIterable( ArrowExamplesIterable(generate_tables_fn, {}), lambda t: True, formatting=FormattingConfig(format_type="arrow"), ), # BufferShuffledExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10, np.random.default_rng(42)), # not implemented # SkipExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented # TakeExamplesIterable(ArrowExamplesIterable(generate_tables_fn, {}), 10), # not implemented TypedExamplesIterable( ArrowExamplesIterable(generate_tables_fn, {}), Features({"id": Value("int32")}), token_per_repo_id={} ), ], ) def test_iter_arrow(ex_iterable: _BaseExamplesIterable): assert ex_iterable.iter_arrow is not None key, pa_table = next(ex_iterable.iter_arrow()) assert isinstance(pa_table, pa.Table) ############################ # # IterableDataset tests # ############################ def test_iterable_dataset(): dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {})) expected = [x for _, x in generate_examples_fn()] assert next(iter(dataset)) == expected[0] assert list(dataset) == expected def test_iterable_dataset_from_generator(): data = [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] def gen(): yield from data dataset = IterableDataset.from_generator(gen) assert isinstance(dataset, IterableDataset) assert list(dataset) == data def test_iterable_dataset_from_generator_with_shards(): def gen(shard_names): for shard_name in shard_names: for i in range(10): yield {"shard_name": shard_name, "i": i} shard_names = [f"data{shard_idx}.txt" for shard_idx in range(4)] dataset = IterableDataset.from_generator(gen, gen_kwargs={"shard_names": shard_names}) assert isinstance(dataset, IterableDataset) assert dataset.n_shards == len(shard_names) def test_iterable_dataset_from_file(dataset: IterableDataset, arrow_file: str): with assert_arrow_memory_doesnt_increase(): dataset_from_file = IterableDataset.from_file(arrow_file) expected_features = dataset._resolve_features().features assert dataset_from_file.features.type == expected_features.type assert dataset_from_file.features == expected_features assert isinstance(dataset_from_file, IterableDataset) assert list(dataset_from_file) == list(dataset) @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_streaming(): import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [ ("0", 0, 0.0), ("1", 1, 1.0), ("2", 2, 2.0), ("3", 3, 3.0), ] df = spark.createDataFrame(data, "col_1: string, col_2: int, col_3: float") dataset = IterableDataset.from_spark(df) assert isinstance(dataset, IterableDataset) results = [] for ex in dataset: results.append(ex) assert results == [ {"col_1": "0", "col_2": 0, "col_3": 0.0}, {"col_1": "1", "col_2": 1, "col_3": 1.0}, {"col_1": "2", "col_2": 2, "col_3": 2.0}, {"col_1": "3", "col_2": 3, "col_3": 3.0}, ] @require_not_windows @require_dill_gt_0_3_2 @require_pyspark def test_from_spark_streaming_features(): import PIL.Image import pyspark spark = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate() data = [(0, np.arange(4 * 4 * 3).reshape(4, 4, 3).tolist())] df = spark.createDataFrame(data, "idx: int, image: array<array<array<int>>>") features = Features({"idx": Value("int64"), "image": Image()}) dataset = IterableDataset.from_spark( df, features=features, ) assert isinstance(dataset, IterableDataset) results = [] for ex in dataset: results.append(ex) assert len(results) == 1 isinstance(results[0]["image"], PIL.Image.Image) @require_torch def test_iterable_dataset_torch_integration(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) import torch.utils.data assert isinstance(dataset, torch.utils.data.IterableDataset) assert isinstance(dataset, IterableDataset) assert dataset._ex_iterable is ex_iterable @require_torch def test_iterable_dataset_torch_picklable(): import pickle ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable, formatting=FormattingConfig(format_type="torch")) reloaded_dataset = pickle.loads(pickle.dumps(dataset)) import torch.utils.data assert isinstance(reloaded_dataset, IterableDataset) assert isinstance(reloaded_dataset, torch.utils.data.IterableDataset) assert reloaded_dataset._formatting.format_type == "torch" assert len(list(dataset)) == len(list(reloaded_dataset)) @require_torch def test_iterable_dataset_with_format_torch(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) from torch.utils.data import DataLoader dataloader = DataLoader(dataset) assert len(list(dataloader)) == len(list(ex_iterable)) @require_torch def test_iterable_dataset_torch_dataloader_parallel(): from torch.utils.data import DataLoader ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) dataloader = DataLoader(dataset, num_workers=2, batch_size=None) result = list(dataloader) expected = [example for _, example in ex_iterable] assert len(result) == len(expected) assert {str(x) for x in result} == {str(x) for x in expected} @require_torch @pytest.mark.filterwarnings("ignore:This DataLoader will create:UserWarning") @pytest.mark.parametrize("n_shards, num_workers", [(2, 1), (2, 2), (3, 2), (2, 3)]) def test_sharded_iterable_dataset_torch_dataloader_parallel(n_shards, num_workers): from torch.utils.data import DataLoader ex_iterable = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}.txt" for i in range(n_shards)]}) dataset = IterableDataset(ex_iterable) dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers) result = list(dataloader) expected = [example for _, example in ex_iterable] assert len(result) == len(expected) assert {str(x) for x in result} == {str(x) for x in expected} @require_torch @pytest.mark.integration @pytest.mark.parametrize("num_workers", [1, 2]) def test_iterable_dataset_from_hub_torch_dataloader_parallel(num_workers, tmp_path): from torch.utils.data import DataLoader dataset = load_dataset(SAMPLE_DATASET_IDENTIFIER, cache_dir=str(tmp_path), streaming=True, split="train") dataloader = DataLoader(dataset, batch_size=None, num_workers=num_workers) result = list(dataloader) assert len(result) == 2 @pytest.mark.parametrize("batch_size", [4, 5]) @pytest.mark.parametrize("drop_last_batch", [False, True]) def test_iterable_dataset_iter_batch(batch_size, drop_last_batch): n = 25 dataset = IterableDataset(ExamplesIterable(generate_examples_fn, {"n": n})) all_examples = [ex for _, ex in generate_examples_fn(n=n)] expected = [] for i in range(0, len(all_examples), batch_size): if len(all_examples[i : i + batch_size]) < batch_size and drop_last_batch: continue expected.append(_examples_to_batch(all_examples[i : i + batch_size])) assert next(iter(dataset.iter(batch_size, drop_last_batch=drop_last_batch))) == expected[0] assert list(dataset.iter(batch_size, drop_last_batch=drop_last_batch)) == expected def test_iterable_dataset_info(): info = DatasetInfo(description="desc", citation="@article{}", size_in_bytes=42) ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable, info=info) assert dataset.info == info assert dataset.description == info.description assert dataset.citation == info.citation assert dataset.size_in_bytes == info.size_in_bytes def test_iterable_dataset_set_epoch(dataset: IterableDataset): assert dataset._epoch == 0 dataset.set_epoch(42) assert dataset._epoch == 42 @pytest.mark.parametrize("seed", [None, 42, 1337]) @pytest.mark.parametrize("epoch", [None, 0, 1, 10]) def test_iterable_dataset_set_epoch_of_shuffled_dataset(dataset: IterableDataset, seed, epoch): buffer_size = 10 shuffled_dataset = dataset.shuffle(seed, buffer_size=buffer_size) base_generator = shuffled_dataset._shuffling.generator if epoch is not None: shuffled_dataset.set_epoch(epoch) effective_generator = shuffled_dataset._effective_generator() assert effective_generator is not None if epoch is None or epoch == 0: assert is_rng_equal(base_generator, shuffled_dataset._effective_generator()) else: assert not is_rng_equal(base_generator, shuffled_dataset._effective_generator()) effective_seed = deepcopy(base_generator).integers(0, 1 << 63) - epoch assert is_rng_equal(np.random.default_rng(effective_seed), shuffled_dataset._effective_generator()) def test_iterable_dataset_map( dataset: IterableDataset, ): func = lambda x: {"id+1": x["id"] + 1} # noqa: E731 mapped_dataset = dataset.map(func) assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable) assert mapped_dataset._ex_iterable.function is func assert mapped_dataset._ex_iterable.batched is False assert next(iter(mapped_dataset)) == {**next(iter(dataset)), **func(next(iter(generate_examples_fn()))[1])} def test_iterable_dataset_map_batched( dataset: IterableDataset, ): func = lambda x: {"id+1": [i + 1 for i in x["id"]]} # noqa: E731 batch_size = 3 dataset = dataset.map(func, batched=True, batch_size=batch_size) assert isinstance(dataset._ex_iterable, MappedExamplesIterable) assert dataset._ex_iterable.function is func assert dataset._ex_iterable.batch_size == batch_size assert next(iter(dataset)) == {"id": 0, "id+1": 1} def test_iterable_dataset_map_complex_features( dataset: IterableDataset, ): # https://github.com/huggingface/datasets/issues/3505 ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"}) features = Features( { "id": Value("int64"), "label": Value("string"), } ) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) dataset = dataset.cast_column("label", ClassLabel(names=["negative", "positive"])) dataset = dataset.map(lambda x: {"id+1": x["id"] + 1, **x}) assert isinstance(dataset._ex_iterable, MappedExamplesIterable) features["label"] = ClassLabel(names=["negative", "positive"]) assert [{k: v for k, v in ex.items() if k != "id+1"} for ex in dataset] == [ features.encode_example(ex) for _, ex in ex_iterable ] def test_iterable_dataset_map_with_features(dataset: IterableDataset) -> None: # https://github.com/huggingface/datasets/issues/3888 ex_iterable = ExamplesIterable(generate_examples_fn, {"label": "positive"}) features_before_map = Features( { "id": Value("int64"), "label": Value("string"), } ) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features_before_map)) assert dataset.info.features is not None assert dataset.info.features == features_before_map features_after_map = Features( { "id": Value("int64"), "label": Value("string"), "target": Value("string"), } ) dataset = dataset.map(lambda x: {"target": x["label"]}, features=features_after_map) assert dataset.info.features is not None assert dataset.info.features == features_after_map def test_iterable_dataset_map_with_fn_kwargs(dataset: IterableDataset) -> None: fn_kwargs = {"y": 1} mapped_dataset = dataset.map(lambda x, y: {"id+y": x["id"] + y}, fn_kwargs=fn_kwargs) assert mapped_dataset._ex_iterable.batched is False assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1} batch_size = 3 mapped_dataset = dataset.map( lambda x, y: {"id+y": [i + y for i in x["id"]]}, batched=True, batch_size=batch_size, fn_kwargs=fn_kwargs ) assert isinstance(mapped_dataset._ex_iterable, MappedExamplesIterable) assert mapped_dataset._ex_iterable.batch_size == batch_size assert next(iter(mapped_dataset)) == {"id": 0, "id+y": 1} def test_iterable_dataset_filter(dataset: IterableDataset) -> None: fn_kwargs = {"y": 1} filtered_dataset = dataset.filter(lambda x, y: x["id"] == y, fn_kwargs=fn_kwargs) assert filtered_dataset._ex_iterable.batched is False assert next(iter(filtered_dataset)) == {"id": 1} @pytest.mark.parametrize("seed", [42, 1337, 101010, 123456]) @pytest.mark.parametrize("epoch", [None, 0, 1]) def test_iterable_dataset_shuffle(dataset: IterableDataset, seed, epoch): buffer_size = 3 dataset = deepcopy(dataset) dataset._ex_iterable.kwargs["filepaths"] = ["0.txt", "1.txt"] dataset = dataset.shuffle(seed, buffer_size=buffer_size) assert isinstance(dataset._shuffling, ShufflingConfig) assert isinstance(dataset._shuffling.generator, np.random.Generator) assert is_rng_equal(dataset._shuffling.generator, np.random.default_rng(seed)) # Effective seed is sum of seed and epoch if epoch is None or epoch == 0: effective_seed = seed else: dataset.set_epoch(epoch) effective_seed = np.random.default_rng(seed).integers(0, 1 << 63) - epoch # Shuffling adds a shuffle buffer expected_first_example_index = next( iter(BufferShuffledExamplesIterable._iter_random_indices(np.random.default_rng(effective_seed), buffer_size)) ) assert isinstance(dataset._ex_iterable, BufferShuffledExamplesIterable) # It also shuffles the underlying examples iterable expected_ex_iterable = ExamplesIterable( generate_examples_fn, {"filepaths": ["0.txt", "1.txt"]} ).shuffle_data_sources(np.random.default_rng(effective_seed)) assert isinstance(dataset._ex_iterable.ex_iterable, ExamplesIterable) assert next(iter(dataset)) == list(islice(expected_ex_iterable, expected_first_example_index + 1))[-1][1] @pytest.mark.parametrize( "features", [ None, Features( { "id": Value("int64"), "label": Value("int64"), } ), Features( { "id": Value("int64"), "label": ClassLabel(names=["negative", "positive"]), } ), ], ) def test_iterable_dataset_features(features): ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0}) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) if features: expected = [features.encode_example(x) for _, x in ex_iterable] else: expected = [x for _, x in ex_iterable] assert list(dataset) == expected def test_iterable_dataset_features_cast_to_python(): ex_iterable = ExamplesIterable( generate_examples_fn, {"timestamp": pd.Timestamp(2020, 1, 1), "array": np.ones(5), "n": 1} ) features = Features( { "id": Value("int64"), "timestamp": Value("timestamp[us]"), "array": [Value("int64")], } ) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) assert list(dataset) == [{"timestamp": pd.Timestamp(2020, 1, 1).to_pydatetime(), "array": [1] * 5, "id": 0}] @pytest.mark.parametrize("format_type", [None, "torch", "python", "tf", "tensorflow", "np", "numpy", "jax"]) def test_iterable_dataset_with_format(dataset: IterableDataset, format_type): formatted_dataset = dataset.with_format(format_type) assert formatted_dataset._formatting.format_type == get_format_type_from_alias(format_type) @require_torch def test_iterable_dataset_is_torch_iterable_dataset(dataset: IterableDataset): from torch.utils.data import DataLoader, _DatasetKind dataloader = DataLoader(dataset) assert dataloader._dataset_kind == _DatasetKind.Iterable out = list(dataloader) assert len(out) == DEFAULT_N_EXAMPLES @pytest.mark.parametrize("n", [0, 2, int(1e10)]) def test_iterable_dataset_skip(dataset: IterableDataset, n): skip_dataset = dataset.skip(n) assert isinstance(skip_dataset._ex_iterable, SkipExamplesIterable) assert skip_dataset._ex_iterable.n == n assert list(skip_dataset) == list(dataset)[n:] @pytest.mark.parametrize("n", [0, 2, int(1e10)]) def test_iterable_dataset_take(dataset: IterableDataset, n): take_dataset = dataset.take(n) assert isinstance(take_dataset._ex_iterable, TakeExamplesIterable) assert take_dataset._ex_iterable.n == n assert list(take_dataset) == list(dataset)[:n] @pytest.mark.parametrize("method", ["skip", "take"]) def test_iterable_dataset_shuffle_after_skip_or_take(method): seed = 42 n, n_shards = 3, 10 count = 7 ex_iterable = ExamplesIterable(generate_examples_fn, {"n": n, "filepaths": [f"{i}.txt" for i in range(n_shards)]}) dataset = IterableDataset(ex_iterable) dataset = dataset.skip(n) if method == "skip" else dataset.take(count) shuffled_dataset = dataset.shuffle(seed, buffer_size=DEFAULT_N_EXAMPLES) # shuffling a skip/take dataset should keep the same examples and don't shuffle the shards key = lambda x: f"{x['filepath']}_{x['id']}" # noqa: E731 assert sorted(dataset, key=key) == sorted(shuffled_dataset, key=key) def test_iterable_dataset_add_column(dataset_with_several_columns): new_column = list(range(DEFAULT_N_EXAMPLES)) new_dataset = dataset_with_several_columns.add_column("new_column", new_column) assert list(new_dataset) == [ {**example, "new_column": idx} for idx, example in enumerate(dataset_with_several_columns) ] new_dataset = new_dataset._resolve_features() assert "new_column" in new_dataset.column_names def test_iterable_dataset_rename_column(dataset_with_several_columns): new_dataset = dataset_with_several_columns.rename_column("id", "new_id") assert list(new_dataset) == [ {("new_id" if k == "id" else k): v for k, v in example.items()} for example in dataset_with_several_columns ] assert new_dataset.features is None assert new_dataset.column_names is None # rename the column if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().rename_column("id", "new_id") assert new_dataset.features is not None assert new_dataset.column_names is not None assert "id" not in new_dataset.column_names assert "new_id" in new_dataset.column_names def test_iterable_dataset_rename_columns(dataset_with_several_columns): column_mapping = {"id": "new_id", "filepath": "filename"} new_dataset = dataset_with_several_columns.rename_columns(column_mapping) assert list(new_dataset) == [ {column_mapping.get(k, k): v for k, v in example.items()} for example in dataset_with_several_columns ] assert new_dataset.features is None assert new_dataset.column_names is None # rename the columns if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().rename_columns(column_mapping) assert new_dataset.features is not None assert new_dataset.column_names is not None assert all(c not in new_dataset.column_names for c in ["id", "filepath"]) assert all(c in new_dataset.column_names for c in ["new_id", "filename"]) def test_iterable_dataset_remove_columns(dataset_with_several_columns): new_dataset = dataset_with_several_columns.remove_columns("id") assert list(new_dataset) == [ {k: v for k, v in example.items() if k != "id"} for example in dataset_with_several_columns ] assert new_dataset.features is None new_dataset = dataset_with_several_columns.remove_columns(["id", "filepath"]) assert list(new_dataset) == [ {k: v for k, v in example.items() if k != "id" and k != "filepath"} for example in dataset_with_several_columns ] assert new_dataset.features is None assert new_dataset.column_names is None # remove the columns if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().remove_columns(["id", "filepath"]) assert new_dataset.features is not None assert new_dataset.column_names is not None assert all(c not in new_dataset.features for c in ["id", "filepath"]) assert all(c not in new_dataset.column_names for c in ["id", "filepath"]) def test_iterable_dataset_select_columns(dataset_with_several_columns): new_dataset = dataset_with_several_columns.select_columns("id") assert list(new_dataset) == [ {k: v for k, v in example.items() if k == "id"} for example in dataset_with_several_columns ] assert new_dataset.features is None new_dataset = dataset_with_several_columns.select_columns(["id", "filepath"]) assert list(new_dataset) == [ {k: v for k, v in example.items() if k in ("id", "filepath")} for example in dataset_with_several_columns ] assert new_dataset.features is None # select the columns if ds.features was not None new_dataset = dataset_with_several_columns._resolve_features().select_columns(["id", "filepath"]) assert new_dataset.features is not None assert new_dataset.column_names is not None assert all(c in new_dataset.features for c in ["id", "filepath"]) assert all(c in new_dataset.column_names for c in ["id", "filepath"]) def test_iterable_dataset_cast_column(): ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10}) features = Features({"id": Value("int64"), "label": Value("int64")}) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) casted_dataset = dataset.cast_column("label", Value("bool")) casted_features = features.copy() casted_features["label"] = Value("bool") assert list(casted_dataset) == [casted_features.encode_example(ex) for _, ex in ex_iterable] def test_iterable_dataset_cast(): ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 10}) features = Features({"id": Value("int64"), "label": Value("int64")}) dataset = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) new_features = Features({"id": Value("int64"), "label": Value("bool")}) casted_dataset = dataset.cast(new_features) assert list(casted_dataset) == [new_features.encode_example(ex) for _, ex in ex_iterable] def test_iterable_dataset_resolve_features(): ex_iterable = ExamplesIterable(generate_examples_fn, {}) dataset = IterableDataset(ex_iterable) assert dataset.features is None assert dataset.column_names is None dataset = dataset._resolve_features() assert dataset.features == Features( { "id": Value("int64"), } ) assert dataset.column_names == ["id"] def test_iterable_dataset_resolve_features_keep_order(): def gen(): yield from zip(range(3), [{"a": 1}, {"c": 1}, {"b": 1}]) ex_iterable = ExamplesIterable(gen, {}) dataset = IterableDataset(ex_iterable)._resolve_features() # columns appear in order of appearance in the dataset assert list(dataset.features) == ["a", "c", "b"] assert dataset.column_names == ["a", "c", "b"] def test_iterable_dataset_with_features_fill_with_none(): def gen(): yield from zip(range(2), [{"a": 1}, {"b": 1}]) ex_iterable = ExamplesIterable(gen, {}) info = DatasetInfo(features=Features({"a": Value("int32"), "b": Value("int32")})) dataset = IterableDataset(ex_iterable, info=info) assert list(dataset) == [{"a": 1, "b": None}, {"b": 1, "a": None}] def test_concatenate_datasets(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) dataset2 = IterableDataset(ex_iterable2) concatenated_dataset = concatenate_datasets([dataset1, dataset2]) assert list(concatenated_dataset) == list(dataset1) + list(dataset2) def test_concatenate_datasets_resolves_features(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label": 5}) dataset2 = IterableDataset(ex_iterable2) concatenated_dataset = concatenate_datasets([dataset1, dataset2]) assert concatenated_dataset.features is not None assert sorted(concatenated_dataset.features) == ["id", "label"] def test_concatenate_datasets_with_different_columns(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {}) dataset2 = IterableDataset(ex_iterable2) # missing column "label" -> it should be replaced with nulls extended_dataset2_list = [{"label": None, **x} for x in dataset2] concatenated_dataset = concatenate_datasets([dataset1, dataset2]) assert list(concatenated_dataset) == list(dataset1) + extended_dataset2_list # change order concatenated_dataset = concatenate_datasets([dataset2, dataset1]) assert list(concatenated_dataset) == extended_dataset2_list + list(dataset1) def test_concatenate_datasets_axis_1(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) dataset2 = IterableDataset(ex_iterable2) with pytest.raises(ValueError): # column "id" is duplicated -> raise an error concatenate_datasets([dataset1, dataset2], axis=1) concatenated_dataset = concatenate_datasets([dataset1, dataset2.remove_columns("id")], axis=1) assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, dataset2)] def test_concatenate_datasets_axis_1_resolves_features(): ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10}) dataset1 = IterableDataset(ex_iterable1) ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5}) dataset2 = IterableDataset(ex_iterable2).remove_columns("id") concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1) assert concatenated_dataset.features is not None assert sorted(concatenated_dataset.features) == ["id", "label1", "label2"] def test_concatenate_datasets_axis_1_with_different_lengths(): n1 = 10 ex_iterable1 = ExamplesIterable(generate_examples_fn, {"label1": 10, "n": n1}) dataset1 = IterableDataset(ex_iterable1) n2 = 5 ex_iterable2 = ExamplesIterable(generate_examples_fn, {"label2": 5, "n": n2}) dataset2 = IterableDataset(ex_iterable2).remove_columns("id") # missing rows -> they should be replaced with nulls extended_dataset2_list = list(dataset2) + [{"label2": None}] * (n1 - n2) concatenated_dataset = concatenate_datasets([dataset1, dataset2], axis=1) assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(dataset1, extended_dataset2_list)] # change order concatenated_dataset = concatenate_datasets([dataset2, dataset1], axis=1) assert list(concatenated_dataset) == [{**x, **y} for x, y in zip(extended_dataset2_list, dataset1)] @pytest.mark.parametrize( "probas, seed, expected_length, stopping_strategy", [ (None, None, 3 * (DEFAULT_N_EXAMPLES - 1) + 1, "first_exhausted"), ([1, 0, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"), ([0, 1, 0], None, DEFAULT_N_EXAMPLES, "first_exhausted"), ([0.2, 0.5, 0.3], 42, None, "first_exhausted"), ([0.1, 0.1, 0.8], 1337, None, "first_exhausted"), ([0.5, 0.2, 0.3], 101010, None, "first_exhausted"), (None, None, 3 * DEFAULT_N_EXAMPLES, "all_exhausted"), ([0.2, 0.5, 0.3], 42, None, "all_exhausted"), ([0.1, 0.1, 0.8], 1337, None, "all_exhausted"), ([0.5, 0.2, 0.3], 101010, None, "all_exhausted"), ], ) def test_interleave_datasets(dataset: IterableDataset, probas, seed, expected_length, stopping_strategy): d1 = dataset d2 = dataset.map(lambda x: {"id+1": x["id"] + 1, **x}) d3 = dataset.with_format("python") datasets = [d1, d2, d3] merged_dataset = interleave_datasets( datasets, probabilities=probas, seed=seed, stopping_strategy=stopping_strategy ) def fill_default(example): return {"id": None, "id+1": None, **example} # Check the examples iterable assert isinstance( merged_dataset._ex_iterable, (CyclingMultiSourcesExamplesIterable, RandomlyCyclingMultiSourcesExamplesIterable) ) # Check that it is deterministic if seed is not None: merged_dataset2 = interleave_datasets( [d1, d2, d3], probabilities=probas, seed=seed, stopping_strategy=stopping_strategy ) assert list(merged_dataset) == list(merged_dataset2) # Check features assert merged_dataset.features == Features({"id": Value("int64"), "id+1": Value("int64")}) # Check first example if seed is not None: rng = np.random.default_rng(seed) i = next(iter(RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas))) assert next(iter(merged_dataset)) == fill_default(next(iter(datasets[i]))) else: assert any(next(iter(merged_dataset)) == fill_default(next(iter(dataset))) for dataset in datasets) # Compute length it case it's random if expected_length is None: expected_length = 0 counts = np.array([len(list(d)) for d in datasets]) bool_strategy_func = np.all if stopping_strategy == "all_exhausted" else np.any rng = np.random.default_rng(seed) for i in RandomlyCyclingMultiSourcesExamplesIterable._iter_random_indices(rng, len(datasets), p=probas): counts[i] -= 1 expected_length += 1 if bool_strategy_func(counts <= 0): break # Check length assert len(list(merged_dataset)) == expected_length def test_interleave_datasets_with_features( dataset: IterableDataset, ): features = Features( { "id": Value("int64"), "label": ClassLabel(names=["negative", "positive"]), } ) ex_iterable = ExamplesIterable(generate_examples_fn, {"label": 0}) dataset_with_features = IterableDataset(ex_iterable, info=DatasetInfo(features=features)) merged_dataset = interleave_datasets([dataset, dataset_with_features]) assert merged_dataset.features == features def test_interleave_datasets_with_oversampling(): # Test hardcoded results d1 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [0, 1, 2]])), {})) d2 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [10, 11, 12, 13]])), {})) d3 = IterableDataset(ExamplesIterable((lambda: (yield from [(i, {"a": i}) for i in [20, 21, 22, 23, 24]])), {})) expected_values = [0, 10, 20, 1, 11, 21, 2, 12, 22, 0, 13, 23, 1, 10, 24] # Check oversampling strategy without probabilities assert [x["a"] for x in interleave_datasets([d1, d2, d3], stopping_strategy="all_exhausted")] == expected_values # Check oversampling strategy with probabilities expected_values = [20, 0, 21, 10, 1, 22, 23, 24, 2, 0, 1, 20, 11, 21, 2, 0, 12, 1, 22, 13] values = [ x["a"] for x in interleave_datasets( [d1, d2, d3], probabilities=[0.5, 0.2, 0.3], seed=42, stopping_strategy="all_exhausted" ) ] assert values == expected_values @require_torch def test_with_format_torch(dataset_with_several_columns: IterableDataset): import torch dset = dataset_with_several_columns.with_format(type="torch") example = next(iter(dset)) batch = next(iter(dset.iter(batch_size=3))) assert len(example) == 3 assert isinstance(example["id"], torch.Tensor) assert list(example["id"].shape) == [] assert example["id"].item() == 0 assert isinstance(batch["id"], torch.Tensor) assert isinstance(example["filepath"], list) assert isinstance(example["filepath"][0], str) assert example["filepath"][0] == "data0.txt" assert isinstance(batch["filepath"], list) assert isinstance(example["metadata"], dict) assert isinstance(example["metadata"]["sources"], list) assert isinstance(example["metadata"]["sources"][0], str) assert isinstance(batch["metadata"], list) @require_tf def test_with_format_tf(dataset_with_several_columns: IterableDataset): import tensorflow as tf dset = dataset_with_several_columns.with_format(type="tensorflow") example = next(iter(dset)) batch = next(iter(dset.iter(batch_size=3))) assert isinstance(example["id"], tf.Tensor) assert list(example["id"].shape) == [] assert example["id"].numpy().item() == 0 assert isinstance(batch["id"], tf.Tensor) assert isinstance(example["filepath"], tf.Tensor) assert example["filepath"][0] == b"data0.txt" assert isinstance(batch["filepath"], tf.Tensor) assert isinstance(example["metadata"], dict) assert isinstance(example["metadata"]["sources"], tf.Tensor) assert isinstance(batch["metadata"], list) def test_map_array_are_not_converted_back_to_lists(dataset: IterableDataset): def func(example): return {"array": np.array([1, 2, 3])} dset_test = dataset.map(func) example = next(iter(dset_test)) # not aligned with Dataset.map because we don't convert back to lists after map() assert isinstance(example["array"], np.ndarray) def test_formatted_map(dataset: IterableDataset): dataset = dataset.with_format("np") assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray) dataset = dataset.with_format(None) assert isinstance(next(dataset.iter(batch_size=3))["id"], list) def add_one_numpy(example): assert isinstance(example["id"], np.ndarray) return {"id": example["id"] + 1} dataset = dataset.with_format("np") dataset = dataset.map(add_one_numpy, batched=True) assert isinstance(next(dataset.iter(batch_size=3))["id"], np.ndarray) dataset = dataset.with_format(None) assert isinstance(next(dataset.iter(batch_size=3))["id"], list) @pytest.mark.parametrize("n_shards1, n_shards2, num_workers", [(2, 1, 1), (2, 2, 2), (1, 3, 1), (4, 3, 3)]) def test_interleave_dataset_with_sharding(n_shards1, n_shards2, num_workers): from torch.utils.data import DataLoader ex_iterable1 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-1.txt" for i in range(n_shards1)]}) dataset1 = IterableDataset(ex_iterable1).with_format("torch") ex_iterable2 = ExamplesIterable(generate_examples_fn, {"filepaths": [f"{i}-2.txt" for i in range(n_shards2)]}) dataset2 = IterableDataset(ex_iterable2).with_format("torch") dataset_merged = interleave_datasets([dataset1, dataset2], stopping_strategy="first_exhausted") assert dataset_merged.n_shards == min(n_shards1, n_shards2) dataloader = DataLoader(dataset_merged, batch_size=None, num_workers=num_workers) result = list(dataloader) expected_length = 2 * min( len([example for _, example in ex_iterable1]), len([example for _, example in ex_iterable2]) ) # some samples may be missing because the stopping strategy is applied per process assert expected_length - num_workers <= len(result) <= expected_length assert len(result) == len({str(x) for x in result}) def filter_func(batch): return batch["id"] == 4 def map_func(batch): batch["id"] *= 2 return batch def test_pickle_after_many_transforms(dataset_with_several_columns): dataset = dataset_with_several_columns dataset = dataset.remove_columns(["filepath"]) dataset = dataset.take(5) dataset = dataset.map(map_func) dataset = dataset.shuffle() dataset = dataset.skip(1) dataset = dataset.filter(filter_func) dataset = dataset.add_column("additional_col", ["something"]) dataset = dataset.rename_column("metadata", "metadata1") dataset = dataset.rename_columns({"id": "id1", "metadata1": "metadata2"}) dataset = dataset.select_columns(["id1", "additional_col"]) unpickled_dataset = pickle.loads(pickle.dumps(dataset)) assert list(unpickled_dataset) == list(dataset)
datasets/tests/test_iterable_dataset.py/0
{ "file_path": "datasets/tests/test_iterable_dataset.py", "repo_id": "datasets", "token_count": 36328 }
185
import unittest from unittest.mock import patch import pytest from pytest import CaptureFixture from datasets.utils import ( are_progress_bars_disabled, disable_progress_bars, enable_progress_bars, tqdm, ) class TestTqdmUtils(unittest.TestCase): @pytest.fixture(autouse=True) def capsys(self, capsys: CaptureFixture) -> None: """Workaround to make capsys work in unittest framework. Capsys is a convenient pytest fixture to capture stdout. See https://waylonwalker.com/pytest-capsys/. Taken from https://github.com/pytest-dev/pytest/issues/2504#issuecomment-309475790. """ self.capsys = capsys def setUp(self) -> None: """Get verbosity to set it back after the tests.""" self._previous_are_progress_bars_disabled = are_progress_bars_disabled() return super().setUp() def tearDown(self) -> None: """Set back progress bars verbosity as before testing.""" if self._previous_are_progress_bars_disabled: disable_progress_bars() else: enable_progress_bars() @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_helpers(self) -> None: """Test helpers to enable/disable progress bars.""" disable_progress_bars() self.assertTrue(are_progress_bars_disabled()) enable_progress_bars() self.assertFalse(are_progress_bars_disabled()) @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", True) def test_cannot_enable_tqdm_when_env_variable_is_set(self) -> None: """ Test helpers cannot enable/disable progress bars when `HF_DATASETS_DISABLE_PROGRESS_BARS` is set. """ disable_progress_bars() self.assertTrue(are_progress_bars_disabled()) with self.assertWarns(UserWarning): enable_progress_bars() self.assertTrue(are_progress_bars_disabled()) # Still disabled ! @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", False) def test_cannot_disable_tqdm_when_env_variable_is_set(self) -> None: """ Test helpers cannot enable/disable progress bars when `HF_DATASETS_DISABLE_PROGRESS_BARS` is set. """ enable_progress_bars() self.assertFalse(are_progress_bars_disabled()) with self.assertWarns(UserWarning): disable_progress_bars() self.assertFalse(are_progress_bars_disabled()) # Still enabled ! @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_disabled(self) -> None: """Test TQDM not outputting anything when globally disabled.""" disable_progress_bars() for _ in tqdm(range(10)): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertEqual(captured.err, "") @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_disabled_cannot_be_forced(self) -> None: """Test TQDM cannot be forced when globally disabled.""" disable_progress_bars() for _ in tqdm(range(10), disable=False): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertEqual(captured.err, "") @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_can_be_disabled_when_globally_enabled(self) -> None: """Test TQDM can still be locally disabled even when globally enabled.""" enable_progress_bars() for _ in tqdm(range(10), disable=True): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertEqual(captured.err, "") @patch("datasets.utils._tqdm.HF_DATASETS_DISABLE_PROGRESS_BARS", None) def test_tqdm_enabled(self) -> None: """Test TQDM work normally when globally enabled.""" enable_progress_bars() for _ in tqdm(range(10)): pass captured = self.capsys.readouterr() self.assertEqual(captured.out, "") self.assertIn("10/10", captured.err) # tqdm log
datasets/tests/test_tqdm.py/0
{ "file_path": "datasets/tests/test_tqdm.py", "repo_id": "datasets", "token_count": 1804 }
186
# The Exploration/Exploitation trade-off [[exp-exp-tradeoff]] Finally, before looking at the different methods to solve Reinforcement Learning problems, we must cover one more very important topic: *the exploration/exploitation trade-off.* - *Exploration* is exploring the environment by trying random actions in order to **find more information about the environment.** - *Exploitation* is **exploiting known information to maximize the reward.** Remember, the goal of our RL agent is to maximize the expected cumulative reward. However, **we can fall into a common trap**. Let’s take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_1.jpg" alt="Exploration" width="100%"> In this game, our mouse can have an **infinite amount of small cheese** (+1 each). But at the top of the maze, there is a gigantic sum of cheese (+1000). However, if we only focus on exploitation, our agent will never reach the gigantic sum of cheese. Instead, it will only exploit **the nearest source of rewards,** even if this source is small (exploitation). But if our agent does a little bit of exploration, it can **discover the big reward** (the pile of big cheese). This is what we call the exploration/exploitation trade-off. We need to balance how much we **explore the environment** and how much we **exploit what we know about the environment.** Therefore, we must **define a rule that helps to handle this trade-off**. We’ll see the different ways to handle it in the future units. If it’s still confusing, **think of a real problem: the choice of picking a restaurant:** <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/exp_2.jpg" alt="Exploration"> <figcaption>Source: <a href="https://inst.eecs.berkeley.edu/~cs188/sp20/assets/lecture/lec15_6up.pdf"> Berkley AI Course</a> </figcaption> </figure> - *Exploitation*: You go to the same one that you know is good every day and **take the risk to miss another better restaurant.** - *Exploration*: Try restaurants you never went to before, with the risk of having a bad experience **but the probable opportunity of a fantastic experience.** To recap: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/expexpltradeoff.jpg" alt="Exploration Exploitation Tradeoff" width="100%">
deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx/0
{ "file_path": "deep-rl-class/units/en/unit1/exp-exp-tradeoff.mdx", "repo_id": "deep-rl-class", "token_count": 699 }
187
# Monte Carlo vs Temporal Difference Learning [[mc-vs-td]] The last thing we need to discuss before diving into Q-Learning is the two learning strategies. Remember that an RL agent **learns by interacting with its environment.** The idea is that **given the experience and the received reward, the agent will update its value function or policy.** Monte Carlo and Temporal Difference Learning are two different **strategies on how to train our value function or our policy function.** Both of them **use experience to solve the RL problem.** On one hand, Monte Carlo uses **an entire episode of experience before learning.** On the other hand, Temporal Difference uses **only a step ( \\(S_t, A_t, R_{t+1}, S_{t+1}\\) ) to learn.** We'll explain both of them **using a value-based method example.** ## Monte Carlo: learning at the end of the episode [[monte-carlo]] Monte Carlo waits until the end of the episode, calculates \\(G_t\\) (return) and uses it as **a target for updating \\(V(S_t)\\).** So it requires a **complete episode of interaction before updating our value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/monte-carlo-approach.jpg" alt="Monte Carlo"/> If we take an example: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-2.jpg" alt="Monte Carlo"/> - We always start the episode **at the same starting point.** - **The agent takes actions using the policy**. For instance, using an Epsilon Greedy Strategy, a policy that alternates between exploration (random actions) and exploitation. - We get **the reward and the next state.** - We terminate the episode if the cat eats the mouse or if the mouse moves > 10 steps. - At the end of the episode, **we have a list of State, Actions, Rewards, and Next States tuples** For instance [[State tile 3 bottom, Go Left, +1, State tile 2 bottom], [State tile 2 bottom, Go Left, +0, State tile 1 bottom]...] - **The agent will sum the total rewards \\(G_t\\)** (to see how well it did). - It will then **update \\(V(s_t)\\) based on the formula** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3.jpg" alt="Monte Carlo"/> - Then **start a new game with this new knowledge** By running more and more episodes, **the agent will learn to play better and better.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-3p.jpg" alt="Monte Carlo"/> For instance, if we train a state-value function using Monte Carlo: - We initialize our value function **so that it returns 0 value for each state** - Our learning rate (lr) is 0.1 and our discount rate is 1 (= no discount) - Our mouse **explores the environment and takes random actions** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4.jpg" alt="Monte Carlo"/> - The mouse made more than 10 steps, so the episode ends . <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-4p.jpg" alt="Monte Carlo"/> - We have a list of state, action, rewards, next_state, **we need to calculate the return \\(G{t=0}\\)** \\(G_t = R_{t+1} + R_{t+2} + R_{t+3} ...\\) (for simplicity, we don't discount the rewards) \\(G_0 = R_{1} + R_{2} + R_{3}…\\) \\(G_0 = 1 + 0 + 0 + 0 + 0 + 0 + 1 + 1 + 0 + 0\\) \\(G_0 = 3\\) - We can now compute the **new** \\(V(S_0)\\): <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5.jpg" alt="Monte Carlo"/> \\(V(S_0) = V(S_0) + lr * [G_0 — V(S_0)]\\) \\(V(S_0) = 0 + 0.1 * [3 – 0]\\) \\(V(S_0) = 0.3\\) <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/MC-5p.jpg" alt="Monte Carlo"/> ## Temporal Difference Learning: learning at each step [[td-learning]] **Temporal Difference, on the other hand, waits for only one interaction (one step) \\(S_{t+1}\\)** to form a TD target and update \\(V(S_t)\\) using \\(R_{t+1}\\) and \\( \gamma * V(S_{t+1})\\). The idea with **TD is to update the \\(V(S_t)\\) at each step.** But because we didn't experience an entire episode, we don't have \\(G_t\\) (expected return). Instead, **we estimate \\(G_t\\) by adding \\(R_{t+1}\\) and the discounted value of the next state.** This is called bootstrapping. It's called this **because TD bases its update in part on an existing estimate \\(V(S_{t+1})\\) and not a complete sample \\(G_t\\).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1.jpg" alt="Temporal Difference"/> This method is called TD(0) or **one-step TD (update the value function after any individual step).** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-1p.jpg" alt="Temporal Difference"/> If we take the same example, <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2.jpg" alt="Temporal Difference"/> - We initialize our value function so that it returns 0 value for each state. - Our learning rate (lr) is 0.1, and our discount rate is 1 (no discount). - Our mouse begins to explore the environment and takes a random action: **going to the left** - It gets a reward \\(R_{t+1} = 1\\) since **it eats a piece of cheese** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-2p.jpg" alt="Temporal Difference"/> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3.jpg" alt="Temporal Difference"/> We can now update \\(V(S_0)\\): New \\(V(S_0) = V(S_0) + lr * [R_1 + \gamma * V(S_1) - V(S_0)]\\) New \\(V(S_0) = 0 + 0.1 * [1 + 1 * 0–0]\\) New \\(V(S_0) = 0.1\\) So we just updated our value function for State 0. Now we **continue to interact with this environment with our updated value function.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/TD-3p.jpg" alt="Temporal Difference"/> To summarize: - With *Monte Carlo*, we update the value function from a complete episode, and so we **use the actual accurate discounted return of this episode.** - With *TD Learning*, we update the value function from a step, and we replace \\(G_t\\), which we don't know, with **an estimated return called the TD target.** <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit3/Summary.jpg" alt="Summary"/>
deep-rl-class/units/en/unit2/mc-vs-td.mdx/0
{ "file_path": "deep-rl-class/units/en/unit2/mc-vs-td.mdx", "repo_id": "deep-rl-class", "token_count": 2316 }
188
# Deep Q-Learning [[deep-q-learning]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/thumbnail.jpg" alt="Unit 3 thumbnail" width="100%"> In the last unit, we learned our first reinforcement learning algorithm: Q-Learning, **implemented it from scratch**, and trained it in two environments, FrozenLake-v1 ☃️ and Taxi-v3 🚕. We got excellent results with this simple algorithm, but these environments were relatively simple because the **state space was discrete and small** (16 different states for FrozenLake-v1 and 500 for Taxi-v3). For comparison, the state space in Atari games can **contain \\(10^{9}\\) to \\(10^{11}\\) states**. But as we'll see, producing and updating a **Q-table can become ineffective in large state space environments.** So in this unit, **we'll study our first Deep Reinforcement Learning agent**: Deep Q-Learning. Instead of using a Q-table, Deep Q-Learning uses a Neural Network that takes a state and approximates Q-values for each action based on that state. And **we'll train it to play Space Invaders and other Atari environments using [RL-Zoo](https://github.com/DLR-RM/rl-baselines3-zoo)**, a training framework for RL using Stable-Baselines that provides scripts for training, evaluating agents, tuning hyperparameters, plotting results, and recording videos. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit4/atari-envs.gif" alt="Environments"/> So let’s get started! 🚀
deep-rl-class/units/en/unit3/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit3/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 437 }
189
# How do Unity ML-Agents work? [[how-mlagents-works]] Before training our agent, we need to understand **what ML-Agents is and how it works**. ## What is Unity ML-Agents? [[what-is-mlagents]] [Unity ML-Agents](https://github.com/Unity-Technologies/ml-agents) is a toolkit for the game engine Unity that **allows us to create environments using Unity or use pre-made environments to train our agents**. It’s developed by [Unity Technologies](https://unity.com/), the developers of Unity, one of the most famous Game Engines used by the creators of Firewatch, Cuphead, and Cities: Skylines. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/firewatch.jpeg" alt="Firewatch"/> <figcaption>Firewatch was made with Unity</figcaption> </figure> ## The six components [[six-components]] With Unity ML-Agents, you have six essential components: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/mlagents-1.png" alt="MLAgents"/> <figcaption>Source: <a href="https://unity-technologies.github.io/ml-agents/">Unity ML-Agents Documentation</a> </figcaption> </figure> - The first is the *Learning Environment*, which contains **the Unity scene (the environment) and the environment elements** (game characters). - The second is the *Python Low-level API*, which contains **the low-level Python interface for interacting and manipulating the environment**. It’s the API we use to launch the training. - Then, we have the *External Communicator* that **connects the Learning Environment (made with C#) with the low level Python API (Python)**. - The *Python trainers*: the **Reinforcement algorithms made with PyTorch (PPO, SAC…)**. - The *Gym wrapper*: to encapsulate the RL environment in a gym wrapper. - The *PettingZoo wrapper*: PettingZoo is the multi-agents version of the gym wrapper. ## Inside the Learning Component [[inside-learning-component]] Inside the Learning Component, we have **two important elements**: - The first is the *agent component*, the actor of the scene. We’ll **train the agent by optimizing its policy** (which will tell us what action to take in each state). The policy is called the *Brain*. - Finally, there is the *Academy*. This component **orchestrates agents and their decision-making processes**. Think of this Academy as a teacher who handles Python API requests. To better understand its role, let’s remember the RL process. This can be modeled as a loop that works like this: <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process.jpg" alt="The RL process" width="100%"> <figcaption>The RL Process: a loop of state, action, reward and next state</figcaption> <figcaption>Source: <a href="http://incompleteideas.net/book/RLbook2020.pdf">Reinforcement Learning: An Introduction, Richard Sutton and Andrew G. Barto</a></figcaption> </figure> Now, let’s imagine an agent learning to play a platform game. The RL process looks like this: <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit1/RL_process_game.jpg" alt="The RL process" width="100%"> - Our Agent receives **state \\(S_0\\)** from the **Environment** — we receive the first frame of our game (Environment). - Based on that **state \\(S_0\\),** the Agent takes **action \\(A_0\\)** — our Agent will move to the right. - The environment goes to a **new** **state \\(S_1\\)** — new frame. - The environment gives some **reward \\(R_1\\)** to the Agent — we’re not dead *(Positive Reward +1)*. This RL loop outputs a sequence of **state, action, reward and next state.** The goal of the agent is to **maximize the expected cumulative reward**. The Academy will be the one that will **send the order to our Agents and ensure that agents are in sync**: - Collect Observations - Select your action using your policy - Take the Action - Reset if you reached the max step or if you’re done. <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit5/academy.png" alt="The MLAgents Academy" width="100%"> Now that we understand how ML-Agents works, **we’re ready to train our agents.**
deep-rl-class/units/en/unit5/how-mlagents-works.mdx/0
{ "file_path": "deep-rl-class/units/en/unit5/how-mlagents-works.mdx", "repo_id": "deep-rl-class", "token_count": 1276 }
190
# Introduction [[introduction]] <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit0/thumbnail.png" alt="Thumbnail"/> Since the beginning of this course, we learned to train agents in a *single-agent system* where our agent was alone in its environment: it was **not cooperating or collaborating with other agents**. This worked great, and the single-agent system is useful for many applications. <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/patchwork.jpg" alt="Patchwork"/> <figcaption> A patchwork of all the environments you’ve trained your agents on since the beginning of the course </figcaption> </figure> But, as humans, **we live in a multi-agent world**. Our intelligence comes from interaction with other agents. And so, our **goal is to create agents that can interact with other humans and other agents**. Consequently, we must study how to train deep reinforcement learning agents in a *multi-agents system* to build robust agents that can adapt, collaborate, or compete. So today we’re going to **learn the basics of the fascinating topic of multi-agents reinforcement learning (MARL)**. And the most exciting part is that, during this unit, you’re going to train your first agents in a multi-agents system: **a 2vs2 soccer team that needs to beat the opponent team**. And you’re going to participate in **AI vs. AI challenge** where your trained agent will compete against other classmates’ agents every day and be ranked on a [new leaderboard](https://huggingface.co/spaces/huggingface-projects/AIvsAI-SoccerTwos). <figure> <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit10/soccertwos.gif" alt="SoccerTwos"/> <figcaption>This environment was made by the <a href="https://github.com/Unity-Technologies/ml-agents">Unity MLAgents Team</a></figcaption> </figure> So let’s get started!
deep-rl-class/units/en/unit7/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unit7/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 574 }
191
# Introduction [[introduction]] In this bonus unit, we'll reinforce what we learned in the first unit by teaching Huggy the Dog to fetch the stick and then [play with him directly in your browser](https://huggingface.co/spaces/ThomasSimonini/Huggy) 🐶 <img src="https://huggingface.co/datasets/huggingface-deep-rl-course/course-images/resolve/main/en/unit2/thumbnail.png" alt="Unit bonus 1 thumbnail" width="100%"> So let's get started 🚀
deep-rl-class/units/en/unitbonus1/introduction.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus1/introduction.mdx", "repo_id": "deep-rl-class", "token_count": 138 }
192
# Brief introduction to RL documentation In this advanced topic, we address the question: **how should we monitor and keep track of powerful reinforcement learning agents that we are training in the real world and interfacing with humans?** As machine learning systems have increasingly impacted modern life, the **call for the documentation of these systems has grown**. Such documentation can cover aspects such as the training data used — where it is stored, when it was collected, who was involved, etc. — or the model optimization framework — the architecture, evaluation metrics, relevant papers, etc. — and more. Today, model cards and datasheets are becoming increasingly available. For example, on the Hub (see documentation [here](https://huggingface.co/docs/hub/model-cards)). If you click on a [popular model on the Hub](https://huggingface.co/models), you can learn about its creation process. These model and data specific logs are designed to be completed when the model or dataset are created, leaving them to go un-updated when these models are built into evolving systems in the future. ​ ## Motivating Reward Reports Reinforcement learning systems are fundamentally designed to optimize based on measurements of reward and time. While the notion of a reward function can be mapped nicely to many well-understood fields of supervised learning (via a loss function), understanding of how machine learning systems evolve over time is limited. To that end, the authors introduce [*Reward Reports for Reinforcement Learning*](https://www.notion.so/Brief-introduction-to-RL-documentation-b8cbda5a6f5242338e0756e6bef72af4) (the pithy naming is designed to mirror the popular papers *Model Cards for Model Reporting* and *Datasheets for Datasets*). The goal is to propose a type of documentation focused on the **human factors of reward** and **time-varying feedback systems**. Building on the documentation frameworks for [model cards](https://arxiv.org/abs/1810.03993) and [datasheets](https://arxiv.org/abs/1803.09010) proposed by Mitchell et al. and Gebru et al., we argue the need for Reward Reports for AI systems. **Reward Reports** are living documents for proposed RL deployments that demarcate design choices. However, many questions remain about the applicability of this framework to different RL applications, roadblocks to system interpretability, and the resonances between deployed supervised machine learning systems and the sequential decision-making utilized in RL. At a minimum, Reward Reports are an opportunity for RL practitioners to deliberate on these questions and begin the work of deciding how to resolve them in practice. ​ ## Capturing temporal behavior with documentation The core piece specific to documentation designed for RL and feedback-driven ML systems is a *change-log*. The change-log updates information from the designer (changed training parameters, data, etc.) along with noticed changes from the user (harmful behavior, unexpected responses, etc.). The change log is accompanied by update triggers that encourage monitoring these effects. ## Contributing Some of the most impactful RL-driven systems are multi-stakeholder in nature and behind the closed doors of private corporations. These corporations are largely without regulation, so the burden of documentation falls on the public. If you are interested in contributing, we are building Reward Reports for popular machine learning systems on a public record on [GitHub](https://github.com/RewardReports/reward-reports). ​ For further reading, you can visit the Reward Reports [paper](https://arxiv.org/abs/2204.10817) or look [an example report](https://github.com/RewardReports/reward-reports/tree/main/examples). ## Author This section was written by <a href="https://twitter.com/natolambert"> Nathan Lambert </a>
deep-rl-class/units/en/unitbonus3/rl-documentation.mdx/0
{ "file_path": "deep-rl-class/units/en/unitbonus3/rl-documentation.mdx", "repo_id": "deep-rl-class", "token_count": 886 }
193
.PHONY: deps_table_update modified_only_fixup extra_style_checks quality style fixup fix-copies test test-examples # make sure to test the local checkout in scripts and not the pre-installed one (don't use quotes!) export PYTHONPATH = src check_dirs := examples scripts src tests utils benchmarks modified_only_fixup: $(eval modified_py_files := $(shell python utils/get_modified_files.py $(check_dirs))) @if test -n "$(modified_py_files)"; then \ echo "Checking/fixing $(modified_py_files)"; \ ruff check $(modified_py_files) --fix; \ ruff format $(modified_py_files);\ else \ echo "No library .py files were modified"; \ fi # Update src/diffusers/dependency_versions_table.py deps_table_update: @python setup.py deps_table_update deps_table_check_updated: @md5sum src/diffusers/dependency_versions_table.py > md5sum.saved @python setup.py deps_table_update @md5sum -c --quiet md5sum.saved || (printf "\nError: the version dependency table is outdated.\nPlease run 'make fixup' or 'make style' and commit the changes.\n\n" && exit 1) @rm md5sum.saved # autogenerating code autogenerate_code: deps_table_update # Check that the repo is in a good state repo-consistency: python utils/check_dummies.py python utils/check_repo.py python utils/check_inits.py # this target runs checks on all files quality: ruff check $(check_dirs) setup.py ruff format --check $(check_dirs) setup.py doc-builder style src/diffusers docs/source --max_len 119 --check_only python utils/check_doc_toc.py # Format source code automatically and check is there are any problems left that need manual fixing extra_style_checks: python utils/custom_init_isort.py python utils/check_doc_toc.py --fix_and_overwrite # this target runs checks on all files and potentially modifies some of them style: ruff check $(check_dirs) setup.py --fix ruff format $(check_dirs) setup.py doc-builder style src/diffusers docs/source --max_len 119 ${MAKE} autogenerate_code ${MAKE} extra_style_checks # Super fast fix and check target that only works on relevant modified files since the branch was made fixup: modified_only_fixup extra_style_checks autogenerate_code repo-consistency # Make marked copies of snippets of codes conform to the original fix-copies: python utils/check_copies.py --fix_and_overwrite python utils/check_dummies.py --fix_and_overwrite # Run tests for the library test: python -m pytest -n auto --dist=loadfile -s -v ./tests/ # Run tests for examples test-examples: python -m pytest -n auto --dist=loadfile -s -v ./examples/ # Release stuff pre-release: python utils/release.py pre-patch: python utils/release.py --patch post-release: python utils/release.py --post_release post-patch: python utils/release.py --post_release --patch
diffusers/Makefile/0
{ "file_path": "diffusers/Makefile", "repo_id": "diffusers", "token_count": 929 }
194
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # IP-Adapter [IP-Adapter](https://hf.co/papers/2308.06721) is a lightweight adapter that enables prompting a diffusion model with an image. This method decouples the cross-attention layers of the image and text features. The image features are generated from an image encoder. <Tip> Learn how to load an IP-Adapter checkpoint and image in the IP-Adapter [loading](../../using-diffusers/loading_adapters#ip-adapter) guide, and you can see how to use it in the [usage](../../using-diffusers/ip_adapter) guide. </Tip> ## IPAdapterMixin [[autodoc]] loaders.ip_adapter.IPAdapterMixin ## IPAdapterMaskProcessor [[autodoc]] image_processor.IPAdapterMaskProcessor
diffusers/docs/source/en/api/loaders/ip_adapter.md/0
{ "file_path": "diffusers/docs/source/en/api/loaders/ip_adapter.md", "repo_id": "diffusers", "token_count": 339 }
195
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Image variation The Stable Diffusion model can also generate variations from an input image. It uses a fine-tuned version of a Stable Diffusion model by [Justin Pinkney](https://www.justinpinkney.com/) from [Lambda](https://lambdalabs.com/). The original codebase can be found at [LambdaLabsML/lambda-diffusers](https://github.com/LambdaLabsML/lambda-diffusers#stable-diffusion-image-variations) and additional official checkpoints for image variation can be found at [lambdalabs/sd-image-variations-diffusers](https://huggingface.co/lambdalabs/sd-image-variations-diffusers). <Tip> Make sure to check out the Stable Diffusion [Tips](./overview#tips) section to learn how to explore the tradeoff between scheduler speed and quality, and how to reuse pipeline components efficiently! </Tip> ## StableDiffusionImageVariationPipeline [[autodoc]] StableDiffusionImageVariationPipeline - all - __call__ - enable_attention_slicing - disable_attention_slicing - enable_xformers_memory_efficient_attention - disable_xformers_memory_efficient_attention ## StableDiffusionPipelineOutput [[autodoc]] pipelines.stable_diffusion.StableDiffusionPipelineOutput
diffusers/docs/source/en/api/pipelines/stable_diffusion/image_variation.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/stable_diffusion/image_variation.md", "repo_id": "diffusers", "token_count": 494 }
196
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Text2Video-Zero [Text2Video-Zero: Text-to-Image Diffusion Models are Zero-Shot Video Generators](https://huggingface.co/papers/2303.13439) is by Levon Khachatryan, Andranik Movsisyan, Vahram Tadevosyan, Roberto Henschel, [Zhangyang Wang](https://www.ece.utexas.edu/people/faculty/atlas-wang), Shant Navasardyan, [Humphrey Shi](https://www.humphreyshi.com). Text2Video-Zero enables zero-shot video generation using either: 1. A textual prompt 2. A prompt combined with guidance from poses or edges 3. Video Instruct-Pix2Pix (instruction-guided video editing) Results are temporally consistent and closely follow the guidance and textual prompts. ![teaser-img](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/t2v_zero_teaser.png) The abstract from the paper is: *Recent text-to-video generation approaches rely on computationally heavy training and require large-scale video datasets. In this paper, we introduce a new task of zero-shot text-to-video generation and propose a low-cost approach (without any training or optimization) by leveraging the power of existing text-to-image synthesis methods (e.g., Stable Diffusion), making them suitable for the video domain. Our key modifications include (i) enriching the latent codes of the generated frames with motion dynamics to keep the global scene and the background time consistent; and (ii) reprogramming frame-level self-attention using a new cross-frame attention of each frame on the first frame, to preserve the context, appearance, and identity of the foreground object. Experiments show that this leads to low overhead, yet high-quality and remarkably consistent video generation. Moreover, our approach is not limited to text-to-video synthesis but is also applicable to other tasks such as conditional and content-specialized video generation, and Video Instruct-Pix2Pix, i.e., instruction-guided video editing. As experiments show, our method performs comparably or sometimes better than recent approaches, despite not being trained on additional video data.* You can find additional information about Text2Video-Zero on the [project page](https://text2video-zero.github.io/), [paper](https://arxiv.org/abs/2303.13439), and [original codebase](https://github.com/Picsart-AI-Research/Text2Video-Zero). ## Usage example ### Text-To-Video To generate a video from prompt, run the following Python code: ```python import torch from diffusers import TextToVideoZeroPipeline model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") prompt = "A panda is playing guitar on times square" result = pipe(prompt=prompt).images result = [(r * 255).astype("uint8") for r in result] imageio.mimsave("video.mp4", result, fps=4) ``` You can change these parameters in the pipeline call: * Motion field strength (see the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1): * `motion_field_strength_x` and `motion_field_strength_y`. Default: `motion_field_strength_x=12`, `motion_field_strength_y=12` * `T` and `T'` (see the [paper](https://arxiv.org/abs/2303.13439), Sect. 3.3.1) * `t0` and `t1` in the range `{0, ..., num_inference_steps}`. Default: `t0=45`, `t1=48` * Video length: * `video_length`, the number of frames video_length to be generated. Default: `video_length=8` We can also generate longer videos by doing the processing in a chunk-by-chunk manner: ```python import torch from diffusers import TextToVideoZeroPipeline import numpy as np model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") seed = 0 video_length = 24 #24 ÷ 4fps = 6 seconds chunk_size = 8 prompt = "A panda is playing guitar on times square" # Generate the video chunk-by-chunk result = [] chunk_ids = np.arange(0, video_length, chunk_size - 1) generator = torch.Generator(device="cuda") for i in range(len(chunk_ids)): print(f"Processing chunk {i + 1} / {len(chunk_ids)}") ch_start = chunk_ids[i] ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1] # Attach the first frame for Cross Frame Attention frame_ids = [0] + list(range(ch_start, ch_end)) # Fix the seed for the temporal consistency generator.manual_seed(seed) output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids) result.append(output.images[1:]) # Concatenate chunks and save result = np.concatenate(result) result = [(r * 255).astype("uint8") for r in result] imageio.mimsave("video.mp4", result, fps=4) ``` - #### SDXL Support In order to use the SDXL model when generating a video from prompt, use the `TextToVideoZeroSDXLPipeline` pipeline: ```python import torch from diffusers import TextToVideoZeroSDXLPipeline model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = TextToVideoZeroSDXLPipeline.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ).to("cuda") ``` ### Text-To-Video with Pose Control To generate a video from prompt with additional pose control 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/poses_skeleton_gifs/dance1_corr.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video containing extracted pose images ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 pose_images = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` To extract pose from actual video, read [ControlNet documentation](controlnet). 3. Run `StableDiffusionControlNetPipeline` with our custom attention processor ```python import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor model_id = "runwayml/stable-diffusion-v1-5" controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) prompt = "Darth Vader dancing in a desert" result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` - #### SDXL Support Since our attention processor also works with SDXL, it can be utilized to generate a video from prompt using ControlNet models powered by SDXL: ```python import torch from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor controlnet_model_id = 'thibaud/controlnet-openpose-sdxl-1.0' model_id = 'stabilityai/stable-diffusion-xl-base-1.0' controlnet = ControlNetModel.from_pretrained(controlnet_model_id, torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to('cuda') # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 128, 128), device="cuda", dtype=torch.float16).repeat(len(pose_images), 1, 1, 1) prompt = "Darth Vader dancing in a desert" result = pipe(prompt=[prompt] * len(pose_images), image=pose_images, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` ### Text-To-Video with Edge Control To generate a video from prompt with additional Canny edge control, follow the same steps described above for pose-guided generation using [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny). ### Video Instruct-Pix2Pix To perform text-guided video editing (with [InstructPix2Pix](pix2pix)): 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/pix2pix video/camel.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video from path ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 video = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` 3. Run `StableDiffusionInstructPix2PixPipeline` with our custom attention processor ```python import torch from diffusers import StableDiffusionInstructPix2PixPipeline from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor model_id = "timbrooks/instruct-pix2pix" pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=3)) prompt = "make it Van Gogh Starry Night style" result = pipe(prompt=[prompt] * len(video), image=video).images imageio.mimsave("edited_video.mp4", result, fps=4) ``` ### DreamBooth specialization Methods **Text-To-Video**, **Text-To-Video with Pose Control** and **Text-To-Video with Edge Control** can run with custom [DreamBooth](../../training/dreambooth) models, as shown below for [Canny edge ControlNet model](https://huggingface.co/lllyasviel/sd-controlnet-canny) and [Avatar style DreamBooth](https://huggingface.co/PAIR/text2video-zero-controlnet-canny-avatar) model: 1. Download a demo video ```python from huggingface_hub import hf_hub_download filename = "__assets__/canny_videos_mp4/girl_turning.mp4" repo_id = "PAIR/Text2Video-Zero" video_path = hf_hub_download(repo_type="space", repo_id=repo_id, filename=filename) ``` 2. Read video from path ```python from PIL import Image import imageio reader = imageio.get_reader(video_path, "ffmpeg") frame_count = 8 canny_edges = [Image.fromarray(reader.get_data(i)) for i in range(frame_count)] ``` 3. Run `StableDiffusionControlNetPipeline` with custom trained DreamBooth model ```python import torch from diffusers import StableDiffusionControlNetPipeline, ControlNetModel from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_zero import CrossFrameAttnProcessor # set model id to custom model model_id = "PAIR/text2video-zero-controlnet-canny-avatar" controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16 ).to("cuda") # Set the attention processor pipe.unet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) pipe.controlnet.set_attn_processor(CrossFrameAttnProcessor(batch_size=2)) # fix latents for all frames latents = torch.randn((1, 4, 64, 64), device="cuda", dtype=torch.float16).repeat(len(canny_edges), 1, 1, 1) prompt = "oil painting of a beautiful girl avatar style" result = pipe(prompt=[prompt] * len(canny_edges), image=canny_edges, latents=latents).images imageio.mimsave("video.mp4", result, fps=4) ``` You can filter out some available DreamBooth-trained models with [this link](https://huggingface.co/models?search=dreambooth). <Tip> Make sure to check out the Schedulers [guide](../../using-diffusers/schedulers) to learn how to explore the tradeoff between scheduler speed and quality, and see the [reuse components across pipelines](../../using-diffusers/loading#reuse-components-across-pipelines) section to learn how to efficiently load the same components into multiple pipelines. </Tip> ## TextToVideoZeroPipeline [[autodoc]] TextToVideoZeroPipeline - all - __call__ ## TextToVideoZeroSDXLPipeline [[autodoc]] TextToVideoZeroSDXLPipeline - all - __call__ ## TextToVideoPipelineOutput [[autodoc]] pipelines.text_to_video_synthesis.pipeline_text_to_video_zero.TextToVideoPipelineOutput
diffusers/docs/source/en/api/pipelines/text_to_video_zero.md/0
{ "file_path": "diffusers/docs/source/en/api/pipelines/text_to_video_zero.md", "repo_id": "diffusers", "token_count": 4486 }
197
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # Reduce memory usage A barrier to using diffusion models is the large amount of memory required. To overcome this challenge, there are several memory-reducing techniques you can use to run even some of the largest models on free-tier or consumer GPUs. Some of these techniques can even be combined to further reduce memory usage. <Tip> In many cases, optimizing for memory or speed leads to improved performance in the other, so you should try to optimize for both whenever you can. This guide focuses on minimizing memory usage, but you can also learn more about how to [Speed up inference](fp16). </Tip> The results below are obtained from generating a single 512x512 image from the prompt a photo of an astronaut riding a horse on mars with 50 DDIM steps on a Nvidia Titan RTX, demonstrating the speed-up you can expect as a result of reduced memory consumption. | | latency | speed-up | | ---------------- | ------- | ------- | | original | 9.50s | x1 | | fp16 | 3.61s | x2.63 | | channels last | 3.30s | x2.88 | | traced UNet | 3.21s | x2.96 | | memory-efficient attention | 2.63s | x3.61 | ## Sliced VAE Sliced VAE enables decoding large batches of images with limited VRAM or batches with 32 images or more by decoding the batches of latents one image at a time. You'll likely want to couple this with [`~ModelMixin.enable_xformers_memory_efficient_attention`] to reduce memory use further if you have xFormers installed. To use sliced VAE, call [`~StableDiffusionPipeline.enable_vae_slicing`] on your pipeline before inference: ```python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_vae_slicing() #pipe.enable_xformers_memory_efficient_attention() images = pipe([prompt] * 32).images ``` You may see a small performance boost in VAE decoding on multi-image batches, and there should be no performance impact on single-image batches. ## Tiled VAE Tiled VAE processing also enables working with large images on limited VRAM (for example, generating 4k images on 8GB of VRAM) by splitting the image into overlapping tiles, decoding the tiles, and then blending the outputs together to compose the final image. You should also used tiled VAE with [`~ModelMixin.enable_xformers_memory_efficient_attention`] to reduce memory use further if you have xFormers installed. To use tiled VAE processing, call [`~StableDiffusionPipeline.enable_vae_tiling`] on your pipeline before inference: ```python import torch from diffusers import StableDiffusionPipeline, UniPCMultistepScheduler pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") prompt = "a beautiful landscape photograph" pipe.enable_vae_tiling() #pipe.enable_xformers_memory_efficient_attention() image = pipe([prompt], width=3840, height=2224, num_inference_steps=20).images[0] ``` The output image has some tile-to-tile tone variation because the tiles are decoded separately, but you shouldn't see any sharp and obvious seams between the tiles. Tiling is turned off for images that are 512x512 or smaller. ## CPU offloading Offloading the weights to the CPU and only loading them on the GPU when performing the forward pass can also save memory. Often, this technique can reduce memory consumption to less than 3GB. To perform CPU offloading, call [`~StableDiffusionPipeline.enable_sequential_cpu_offload`]: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_sequential_cpu_offload() image = pipe(prompt).images[0] ``` CPU offloading works on submodules rather than whole models. This is the best way to minimize memory consumption, but inference is much slower due to the iterative nature of the diffusion process. The UNet component of the pipeline runs several times (as many as `num_inference_steps`); each time, the different UNet submodules are sequentially onloaded and offloaded as needed, resulting in a large number of memory transfers. <Tip> Consider using [model offloading](#model-offloading) if you want to optimize for speed because it is much faster. The tradeoff is your memory savings won't be as large. </Tip> <Tip warning={true}> When using [`~StableDiffusionPipeline.enable_sequential_cpu_offload`], don't move the pipeline to CUDA beforehand or else the gain in memory consumption will only be minimal (see this [issue](https://github.com/huggingface/diffusers/issues/1934) for more information). [`~StableDiffusionPipeline.enable_sequential_cpu_offload`] is a stateful operation that installs hooks on the models. </Tip> ## Model offloading <Tip> Model offloading requires 🤗 Accelerate version 0.17.0 or higher. </Tip> [Sequential CPU offloading](#cpu-offloading) preserves a lot of memory but it makes inference slower because submodules are moved to GPU as needed, and they're immediately returned to the CPU when a new module runs. Full-model offloading is an alternative that moves whole models to the GPU, instead of handling each model's constituent *submodules*. There is a negligible impact on inference time (compared with moving the pipeline to `cuda`), and it still provides some memory savings. During model offloading, only one of the main components of the pipeline (typically the text encoder, UNet and VAE) is placed on the GPU while the others wait on the CPU. Components like the UNet that run for multiple iterations stay on the GPU until they're no longer needed. Enable model offloading by calling [`~StableDiffusionPipeline.enable_model_cpu_offload`] on the pipeline: ```Python import torch from diffusers import StableDiffusionPipeline pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ) prompt = "a photo of an astronaut riding a horse on mars" pipe.enable_model_cpu_offload() image = pipe(prompt).images[0] ``` <Tip warning={true}> In order to properly offload models after they're called, it is required to run the entire pipeline and models are called in the pipeline's expected order. Exercise caution if models are reused outside the context of the pipeline after hooks have been installed. See [Removing Hooks](https://huggingface.co/docs/accelerate/en/package_reference/big_modeling#accelerate.hooks.remove_hook_from_module) for more information. [`~StableDiffusionPipeline.enable_model_cpu_offload`] is a stateful operation that installs hooks on the models and state on the pipeline. </Tip> ## Channels-last memory format The channels-last memory format is an alternative way of ordering NCHW tensors in memory to preserve dimension ordering. Channels-last tensors are ordered in such a way that the channels become the densest dimension (storing images pixel-per-pixel). Since not all operators currently support the channels-last format, it may result in worst performance but you should still try and see if it works for your model. For example, to set the pipeline's UNet to use the channels-last format: ```python print(pipe.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1) pipe.unet.to(memory_format=torch.channels_last) # in-place operation print( pipe.unet.conv_out.state_dict()["weight"].stride() ) # (2880, 1, 960, 320) having a stride of 1 for the 2nd dimension proves that it works ``` ## Tracing Tracing runs an example input tensor through the model and captures the operations that are performed on it as that input makes its way through the model's layers. The executable or `ScriptFunction` that is returned is optimized with just-in-time compilation. To trace a UNet: ```python import time import torch from diffusers import StableDiffusionPipeline import functools # torch disable grad torch.set_grad_enabled(False) # set variables n_experiments = 2 unet_runs_per_experiment = 50 # load inputs def generate_inputs(): sample = torch.randn((2, 4, 64, 64), device="cuda", dtype=torch.float16) timestep = torch.rand(1, device="cuda", dtype=torch.float16) * 999 encoder_hidden_states = torch.randn((2, 77, 768), device="cuda", dtype=torch.float16) return sample, timestep, encoder_hidden_states pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") unet = pipe.unet unet.eval() unet.to(memory_format=torch.channels_last) # use channels_last memory format unet.forward = functools.partial(unet.forward, return_dict=False) # set return_dict=False as default # warmup for _ in range(3): with torch.inference_mode(): inputs = generate_inputs() orig_output = unet(*inputs) # trace print("tracing..") unet_traced = torch.jit.trace(unet, inputs) unet_traced.eval() print("done tracing") # warmup and optimize graph for _ in range(5): with torch.inference_mode(): inputs = generate_inputs() orig_output = unet_traced(*inputs) # benchmarking with torch.inference_mode(): for _ in range(n_experiments): torch.cuda.synchronize() start_time = time.time() for _ in range(unet_runs_per_experiment): orig_output = unet_traced(*inputs) torch.cuda.synchronize() print(f"unet traced inference took {time.time() - start_time:.2f} seconds") for _ in range(n_experiments): torch.cuda.synchronize() start_time = time.time() for _ in range(unet_runs_per_experiment): orig_output = unet(*inputs) torch.cuda.synchronize() print(f"unet inference took {time.time() - start_time:.2f} seconds") # save the model unet_traced.save("unet_traced.pt") ``` Replace the `unet` attribute of the pipeline with the traced model: ```python from diffusers import StableDiffusionPipeline import torch from dataclasses import dataclass @dataclass class UNet2DConditionOutput: sample: torch.Tensor pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") # use jitted unet unet_traced = torch.jit.load("unet_traced.pt") # del pipe.unet class TracedUNet(torch.nn.Module): def __init__(self): super().__init__() self.in_channels = pipe.unet.config.in_channels self.device = pipe.unet.device def forward(self, latent_model_input, t, encoder_hidden_states): sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0] return UNet2DConditionOutput(sample=sample) pipe.unet = TracedUNet() with torch.inference_mode(): image = pipe([prompt] * 1, num_inference_steps=50).images[0] ``` ## Memory-efficient attention Recent work on optimizing bandwidth in the attention block has generated huge speed-ups and reductions in GPU memory usage. The most recent type of memory-efficient attention is [Flash Attention](https://arxiv.org/abs/2205.14135) (you can check out the original code at [HazyResearch/flash-attention](https://github.com/HazyResearch/flash-attention)). <Tip> If you have PyTorch >= 2.0 installed, you should not expect a speed-up for inference when enabling `xformers`. </Tip> To use Flash Attention, install the following: - PyTorch > 1.12 - CUDA available - [xFormers](xformers) Then call [`~ModelMixin.enable_xformers_memory_efficient_attention`] on the pipeline: ```python from diffusers import DiffusionPipeline import torch pipe = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True, ).to("cuda") pipe.enable_xformers_memory_efficient_attention() with torch.inference_mode(): sample = pipe("a small cat") # optional: You can disable it via # pipe.disable_xformers_memory_efficient_attention() ``` The iteration speed when using `xformers` should match the iteration speed of PyTorch 2.0 as described [here](torch2.0).
diffusers/docs/source/en/optimization/memory.md/0
{ "file_path": "diffusers/docs/source/en/optimization/memory.md", "repo_id": "diffusers", "token_count": 4133 }
198
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. --> # DreamBooth [DreamBooth](https://huggingface.co/papers/2208.12242) is a training technique that updates the entire diffusion model by training on just a few images of a subject or style. It works by associating a special word in the prompt with the example images. If you're training on a GPU with limited vRAM, you should try enabling the `gradient_checkpointing` and `mixed_precision` parameters in the training command. You can also reduce your memory footprint by using memory-efficient attention with [xFormers](../optimization/xformers). JAX/Flax training is also supported for efficient training on TPUs and GPUs, but it doesn't support gradient checkpointing or xFormers. You should have a GPU with >30GB of memory if you want to train faster with Flax. This guide will explore the [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) script to help you become more familiar with it, and how you can adapt it for your own use-case. Before running the script, make sure you install the library from source: ```bash git clone https://github.com/huggingface/diffusers cd diffusers pip install . ``` Navigate to the example folder with the training script and install the required dependencies for the script you're using: <hfoptions id="installation"> <hfoption id="PyTorch"> ```bash cd examples/dreambooth pip install -r requirements.txt ``` </hfoption> <hfoption id="Flax"> ```bash cd examples/dreambooth pip install -r requirements_flax.txt ``` </hfoption> </hfoptions> <Tip> 🤗 Accelerate is a library for helping you train on multiple GPUs/TPUs or with mixed-precision. It'll automatically configure your training setup based on your hardware and environment. Take a look at the 🤗 Accelerate [Quick tour](https://huggingface.co/docs/accelerate/quicktour) to learn more. </Tip> Initialize an 🤗 Accelerate environment: ```bash accelerate config ``` To setup a default 🤗 Accelerate environment without choosing any configurations: ```bash accelerate config default ``` Or if your environment doesn't support an interactive shell, like a notebook, you can use: ```py from accelerate.utils import write_basic_config write_basic_config() ``` Lastly, if you want to train a model on your own dataset, take a look at the [Create a dataset for training](create_dataset) guide to learn how to create a dataset that works with the training script. <Tip> The following sections highlight parts of the training script that are important for understanding how to modify it, but it doesn't cover every aspect of the script in detail. If you're interested in learning more, feel free to read through the [script](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) and let us know if you have any questions or concerns. </Tip> ## Script parameters <Tip warning={true}> DreamBooth is very sensitive to training hyperparameters, and it is easy to overfit. Read the [Training Stable Diffusion with Dreambooth using 🧨 Diffusers](https://huggingface.co/blog/dreambooth) blog post for recommended settings for different subjects to help you choose the appropriate hyperparameters. </Tip> The training script offers many parameters for customizing your training run. All of the parameters and their descriptions are found in the [`parse_args()`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L228) function. The parameters are set with default values that should work pretty well out-of-the-box, but you can also set your own values in the training command if you'd like. For example, to train in the bf16 format: ```bash accelerate launch train_dreambooth.py \ --mixed_precision="bf16" ``` Some basic and important parameters to know and specify are: - `--pretrained_model_name_or_path`: the name of the model on the Hub or a local path to the pretrained model - `--instance_data_dir`: path to a folder containing the training dataset (example images) - `--instance_prompt`: the text prompt that contains the special word for the example images - `--train_text_encoder`: whether to also train the text encoder - `--output_dir`: where to save the trained model - `--push_to_hub`: whether to push the trained model to the Hub - `--checkpointing_steps`: frequency of saving a checkpoint as the model trains; this is useful if for some reason training is interrupted, you can continue training from that checkpoint by adding `--resume_from_checkpoint` to your training command ### Min-SNR weighting The [Min-SNR](https://huggingface.co/papers/2303.09556) weighting strategy can help with training by rebalancing the loss to achieve faster convergence. The training script supports predicting `epsilon` (noise) or `v_prediction`, but Min-SNR is compatible with both prediction types. This weighting strategy is only supported by PyTorch and is unavailable in the Flax training script. Add the `--snr_gamma` parameter and set it to the recommended value of 5.0: ```bash accelerate launch train_dreambooth.py \ --snr_gamma=5.0 ``` ### Prior preservation loss Prior preservation loss is a method that uses a model's own generated samples to help it learn how to generate more diverse images. Because these generated sample images belong to the same class as the images you provided, they help the model retain what it has learned about the class and how it can use what it already knows about the class to make new compositions. - `--with_prior_preservation`: whether to use prior preservation loss - `--prior_loss_weight`: controls the influence of the prior preservation loss on the model - `--class_data_dir`: path to a folder containing the generated class sample images - `--class_prompt`: the text prompt describing the class of the generated sample images ```bash accelerate launch train_dreambooth.py \ --with_prior_preservation \ --prior_loss_weight=1.0 \ --class_data_dir="path/to/class/images" \ --class_prompt="text prompt describing class" ``` ### Train text encoder To improve the quality of the generated outputs, you can also train the text encoder in addition to the UNet. This requires additional memory and you'll need a GPU with at least 24GB of vRAM. If you have the necessary hardware, then training the text encoder produces better results, especially when generating images of faces. Enable this option by: ```bash accelerate launch train_dreambooth.py \ --train_text_encoder ``` ## Training script DreamBooth comes with its own dataset classes: - [`DreamBoothDataset`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L604): preprocesses the images and class images, and tokenizes the prompts for training - [`PromptDataset`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L738): generates the prompt embeddings to generate the class images If you enabled [prior preservation loss](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L842), the class images are generated here: ```py sample_dataset = PromptDataset(args.class_prompt, num_new_images) sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size) sample_dataloader = accelerator.prepare(sample_dataloader) pipeline.to(accelerator.device) for example in tqdm( sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process ): images = pipeline(example["prompt"]).images ``` Next is the [`main()`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L799) function which handles setting up the dataset for training and the training loop itself. The script loads the [tokenizer](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L898), [scheduler and models](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L912C1-L912C1): ```py # Load the tokenizer if args.tokenizer_name: tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False) elif args.pretrained_model_name_or_path: tokenizer = AutoTokenizer.from_pretrained( args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, use_fast=False, ) # Load scheduler and models noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler") text_encoder = text_encoder_cls.from_pretrained( args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision ) if model_has_vae(args): vae = AutoencoderKL.from_pretrained( args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision ) else: vae = None unet = UNet2DConditionModel.from_pretrained( args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision ) ``` Then, it's time to [create the training dataset](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L1073) and DataLoader from `DreamBoothDataset`: ```py train_dataset = DreamBoothDataset( instance_data_root=args.instance_data_dir, instance_prompt=args.instance_prompt, class_data_root=args.class_data_dir if args.with_prior_preservation else None, class_prompt=args.class_prompt, class_num=args.num_class_images, tokenizer=tokenizer, size=args.resolution, center_crop=args.center_crop, encoder_hidden_states=pre_computed_encoder_hidden_states, class_prompt_encoder_hidden_states=pre_computed_class_prompt_encoder_hidden_states, tokenizer_max_length=args.tokenizer_max_length, ) train_dataloader = torch.utils.data.DataLoader( train_dataset, batch_size=args.train_batch_size, shuffle=True, collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation), num_workers=args.dataloader_num_workers, ) ``` Lastly, the [training loop](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L1151) takes care of the remaining steps such as converting images to latent space, adding noise to the input, predicting the noise residual, and calculating the loss. If you want to learn more about how the training loop works, check out the [Understanding pipelines, models and schedulers](../using-diffusers/write_own_pipeline) tutorial which breaks down the basic pattern of the denoising process. ## Launch the script You're now ready to launch the training script! 🚀 For this guide, you'll download some images of a [dog](https://huggingface.co/datasets/diffusers/dog-example) and store them in a directory. But remember, you can create and use your own dataset if you want (see the [Create a dataset for training](create_dataset) guide). ```py from huggingface_hub import snapshot_download local_dir = "./dog" snapshot_download( "diffusers/dog-example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes", ) ``` Set the environment variable `MODEL_NAME` to a model id on the Hub or a path to a local model, `INSTANCE_DIR` to the path where you just downloaded the dog images to, and `OUTPUT_DIR` to where you want to save the model. You'll use `sks` as the special word to tie the training to. If you're interested in following along with the training process, you can periodically save generated images as training progresses. Add the following parameters to the training command: ```bash --validation_prompt="a photo of a sks dog" --num_validation_images=4 --validation_steps=100 ``` One more thing before you launch the script! Depending on the GPU you have, you may need to enable certain optimizations to train DreamBooth. <hfoptions id="gpu-select"> <hfoption id="16GB"> On a 16GB GPU, you can use bitsandbytes 8-bit optimizer and gradient checkpointing to help you train a DreamBooth model. Install bitsandbytes: ```py pip install bitsandbytes ``` Then, add the following parameter to your training command: ```bash accelerate launch train_dreambooth.py \ --gradient_checkpointing \ --use_8bit_adam \ ``` </hfoption> <hfoption id="12GB"> On a 12GB GPU, you'll need bitsandbytes 8-bit optimizer, gradient checkpointing, xFormers, and set the gradients to `None` instead of zero to reduce your memory-usage. ```bash accelerate launch train_dreambooth.py \ --use_8bit_adam \ --gradient_checkpointing \ --enable_xformers_memory_efficient_attention \ --set_grads_to_none \ ``` </hfoption> <hfoption id="8GB"> On a 8GB GPU, you'll need [DeepSpeed](https://www.deepspeed.ai/) to offload some of the tensors from the vRAM to either the CPU or NVME to allow training with less GPU memory. Run the following command to configure your 🤗 Accelerate environment: ```bash accelerate config ``` During configuration, confirm that you want to use DeepSpeed. Now it should be possible to train on under 8GB vRAM by combining DeepSpeed stage 2, fp16 mixed precision, and offloading the model parameters and the optimizer state to the CPU. The drawback is that this requires more system RAM (~25 GB). See the [DeepSpeed documentation](https://huggingface.co/docs/accelerate/usage_guides/deepspeed) for more configuration options. You should also change the default Adam optimizer to DeepSpeed’s optimized version of Adam [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu) for a substantial speedup. Enabling `DeepSpeedCPUAdam` requires your system’s CUDA toolchain version to be the same as the one installed with PyTorch. bitsandbytes 8-bit optimizers don’t seem to be compatible with DeepSpeed at the moment. That's it! You don't need to add any additional parameters to your training command. </hfoption> </hfoptions> <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```bash export MODEL_NAME="runwayml/stable-diffusion-v1-5" export INSTANCE_DIR="./dog" export OUTPUT_DIR="path_to_saved_model" accelerate launch train_dreambooth.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --gradient_accumulation_steps=1 \ --learning_rate=5e-6 \ --lr_scheduler="constant" \ --lr_warmup_steps=0 \ --max_train_steps=400 \ --push_to_hub ``` </hfoption> <hfoption id="Flax"> ```bash export MODEL_NAME="duongna/stable-diffusion-v1-4-flax" export INSTANCE_DIR="./dog" export OUTPUT_DIR="path-to-save-model" python train_dreambooth_flax.py \ --pretrained_model_name_or_path=$MODEL_NAME \ --instance_data_dir=$INSTANCE_DIR \ --output_dir=$OUTPUT_DIR \ --instance_prompt="a photo of sks dog" \ --resolution=512 \ --train_batch_size=1 \ --learning_rate=5e-6 \ --max_train_steps=400 \ --push_to_hub ``` </hfoption> </hfoptions> Once training is complete, you can use your newly trained model for inference! <Tip> Can't wait to try your model for inference before training is complete? 🤭 Make sure you have the latest version of 🤗 Accelerate installed. ```py from diffusers import DiffusionPipeline, UNet2DConditionModel from transformers import CLIPTextModel import torch unet = UNet2DConditionModel.from_pretrained("path/to/model/checkpoint-100/unet") # if you have trained with `--args.train_text_encoder` make sure to also load the text encoder text_encoder = CLIPTextModel.from_pretrained("path/to/model/checkpoint-100/checkpoint-100/text_encoder") pipeline = DiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", unet=unet, text_encoder=text_encoder, dtype=torch.float16, ).to("cuda") image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ``` </Tip> <hfoptions id="training-inference"> <hfoption id="PyTorch"> ```py from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained("path_to_saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda") image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guidance_scale=7.5).images[0] image.save("dog-bucket.png") ``` </hfoption> <hfoption id="Flax"> ```py import jax import numpy as np from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxStableDiffusionPipeline pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path-to-your-trained-model", dtype=jax.numpy.bfloat16) prompt = "A photo of sks dog in a bucket" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) image.save("dog-bucket.png") ``` </hfoption> </hfoptions> ## LoRA LoRA is a training technique for significantly reducing the number of trainable parameters. As a result, training is faster and it is easier to store the resulting weights because they are a lot smaller (~100MBs). Use the [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) script to train with LoRA. The LoRA training script is discussed in more detail in the [LoRA training](lora) guide. ## Stable Diffusion XL Stable Diffusion XL (SDXL) is a powerful text-to-image model that generates high-resolution images, and it adds a second text-encoder to its architecture. Use the [train_dreambooth_lora_sdxl.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_sdxl.py) script to train a SDXL model with LoRA. The SDXL training script is discussed in more detail in the [SDXL training](sdxl) guide. ## Next steps Congratulations on training your DreamBooth model! To learn more about how to use your new model, the following guide may be helpful: - Learn how to [load a DreamBooth](../using-diffusers/loading_adapters) model for inference if you trained your model with LoRA.
diffusers/docs/source/en/training/dreambooth.md/0
{ "file_path": "diffusers/docs/source/en/training/dreambooth.md", "repo_id": "diffusers", "token_count": 6160 }
199